Changeset View
Standalone View
release/scripts/freestyle/modules/parameter_editor.py
| Context not available. | |||||
| UnaryPredicate0D, | UnaryPredicate0D, | ||||
| UnaryPredicate1D, | UnaryPredicate1D, | ||||
| TVertex, | TVertex, | ||||
| Material, | |||||
| ViewEdge, | |||||
| ) | ) | ||||
| from freestyle.chainingiterators import ( | from freestyle.chainingiterators import ( | ||||
| ChainPredicateIterator, | ChainPredicateIterator, | ||||
| Context not available. | |||||
| ) | ) | ||||
| from freestyle.functions import ( | from freestyle.functions import ( | ||||
| Curvature2DAngleF0D, | Curvature2DAngleF0D, | ||||
| CurveMaterialF0D, | |||||
| Normal2DF0D, | Normal2DF0D, | ||||
| QuantitativeInvisibilityF1D, | QuantitativeInvisibilityF1D, | ||||
| VertexOrientation2DF0D, | VertexOrientation2DF0D, | ||||
| CurveMaterialF0D, | |||||
| ) | ) | ||||
| from freestyle.predicates import ( | from freestyle.predicates import ( | ||||
| AndUP1D, | AndUP1D, | ||||
| Context not available. | |||||
| pyBluePrintCirclesShader, | pyBluePrintCirclesShader, | ||||
| pyBluePrintEllipsesShader, | pyBluePrintEllipsesShader, | ||||
| pyBluePrintSquaresShader, | pyBluePrintSquaresShader, | ||||
| RoundCapShader, | |||||
| SquareCapShader, | |||||
| ) | ) | ||||
| from freestyle.utils import ( | from freestyle.utils import ( | ||||
| ContextFunctions, | ContextFunctions, | ||||
| getCurrentScene, | getCurrentScene, | ||||
| stroke_normal, | stroke_normal, | ||||
| bound, | |||||
| pairwise, | |||||
| iter_distance_along_stroke, | |||||
| get_material_value, | |||||
| iter_t2d_along_stroke, | |||||
| iter_distance_from_camera, | |||||
| iter_distance_from_object | |||||
| ) | ) | ||||
| from _freestyle import ( | from _freestyle import ( | ||||
| blendRamp, | blendRamp, | ||||
| evaluateColorRamp, | evaluateColorRamp, | ||||
| evaluateCurveMappingF, | evaluateCurveMappingF, | ||||
| ) | ) | ||||
| import math | |||||
| import mathutils | |||||
| import time | import time | ||||
| from mathutils import Vector | |||||
| from math import pi, sin, cos, acos, radians | |||||
| from itertools import cycle, tee | |||||
| from functools import namedtuple | |||||
| # named tuple primitives used for storing data. | |||||
| Thickness = namedtuple("Thickness", ["min", "max", "delta"]) | |||||
| Range = namedtuple("Range", ["min", "max", "delta"]) | |||||
| Value = namedtuple("Value", ["min", "max", "delta"]) | |||||
kjym3: I guess we could unify these named tuples by a more general name, e.g. 'Bound' or 'Limit'. | |||||
| class ColorRampModifier(StrokeShader): | class ColorRampModifier(StrokeShader): | ||||
| """Primitive for the color modifiers """ | |||||
| def __init__(self, blend, influence, ramp): | def __init__(self, blend, influence, ramp): | ||||
| StrokeShader.__init__(self) | StrokeShader.__init__(self) | ||||
| self.__blend = blend | self.blend = blend | ||||
| self.__influence = influence | self.influence = influence | ||||
| self.__ramp = ramp | self.ramp = ramp | ||||
| def evaluate(self, t): | def evaluate(self, t): | ||||
| col = evaluateColorRamp(self.__ramp, t) | col = evaluateColorRamp(self.ramp, t) | ||||
| col = col.xyz # omit alpha | return col.xyz # omit alpha | ||||
| return col | |||||
| def blend_ramp(self, a, b): | def blend_ramp(self, a, b): | ||||
| return blendRamp(self.__blend, a, self.__influence, b) | return blendRamp(self.blend, a, self.influence, b) | ||||
| class ScalarBlendModifier(StrokeShader): | class ScalarBlendModifier(StrokeShader): | ||||
| def __init__(self, blend, influence): | """Primitive for alpha and thickness modifiers """ | ||||
| def __init__(self, blend_type, influence): | |||||
| StrokeShader.__init__(self) | StrokeShader.__init__(self) | ||||
| self.__blend = blend | self.blend_type = blend_type | ||||
| self.__influence = influence | self.influence = influence | ||||
| def blend(self, v1, v2): | def blend(self, v1, v2): | ||||
| fac = self.__influence | fac = self.influence | ||||
| facm = 1.0 - fac | facm = 1.0 - fac | ||||
| if self.__blend == 'MIX': | if self.blend_type == 'MIX': | ||||
| v1 = facm * v1 + fac * v2 | v1 = facm * v1 + fac * v2 | ||||
| elif self.__blend == 'ADD': | elif self.blend_type == 'ADD': | ||||
| v1 += fac * v2 | v1 += fac * v2 | ||||
| elif self.__blend == 'MULTIPLY': | elif self.blend_type == 'MULTIPLY': | ||||
| v1 *= facm + fac * v2 | v1 *= facm + fac * v2 | ||||
| elif self.__blend == 'SUBTRACT': | elif self.blend_type == 'SUBTRACT': | ||||
| v1 -= fac * v2 | v1 -= fac * v2 | ||||
| elif self.__blend == 'DIVIDE': | elif self.blend_type == 'DIVIDE': | ||||
| if v2 != 0.0: | v1 = facm * v1 + fac * v1 / v2 if v2 != 0.0 else v1 | ||||
| v1 = facm * v1 + fac * v1 / v2 | elif self.blend_type == 'DIFFERENCE': | ||||
| elif self.__blend == 'DIFFERENCE': | |||||
| v1 = facm * v1 + fac * abs(v1 - v2) | v1 = facm * v1 + fac * abs(v1 - v2) | ||||
| elif self.__blend == 'MININUM': | elif self.blend_type == 'MININUM': | ||||
| tmp = fac * v2 | v1 = min(fac * v2, v1) | ||||
| if v1 > tmp: | elif self.blend_type == 'MAXIMUM': | ||||
| v1 = tmp | v1 = max(fac * v2, v1) | ||||
| elif self.__blend == 'MAXIMUM': | |||||
| tmp = fac * v2 | |||||
| if v1 < tmp: | |||||
| v1 = tmp | |||||
| else: | else: | ||||
| raise ValueError("unknown curve blend type: " + self.__blend) | raise ValueError("unknown curve blend type: " + self.blend_type) | ||||
| return v1 | return v1 | ||||
| Context not available. | |||||
| def __init__(self, blend, influence, mapping, invert, curve): | def __init__(self, blend, influence, mapping, invert, curve): | ||||
| ScalarBlendModifier.__init__(self, blend, influence) | ScalarBlendModifier.__init__(self, blend, influence) | ||||
| assert mapping in {'LINEAR', 'CURVE'} | assert mapping in {'LINEAR', 'CURVE'} | ||||
| self.__mapping = getattr(self, mapping) | self.evaluate = getattr(self, mapping) | ||||
| self.__invert = invert | self.mapping_type = mapping | ||||
kjym3Unsubmitted Not Done Inline ActionsI believe self.mapping_type is not necessary. kjym3: I believe `self.mapping_type` is not necessary. | |||||
| self.__curve = curve | self.invert = invert | ||||
| self.curve = curve | |||||
| def LINEAR(self, t): | def LINEAR(self, t): | ||||
| if self.__invert: | return (1.0 - t) if self.invert else t | ||||
| return 1.0 - t | |||||
| return t | |||||
| def CURVE(self, t): | def CURVE(self, t): | ||||
| return evaluateCurveMappingF(self.__curve, 0, t) | return evaluateCurveMappingF(self.curve, 0, t) | ||||
| def evaluate(self, t): | |||||
| return self.__mapping(t) | |||||
| class ThicknessModifierMixIn: | class ThicknessModifierMixIn: | ||||
| def __init__(self): | def __init__(self): | ||||
| scene = getCurrentScene() | scene = getCurrentScene() | ||||
| self.__persp_camera = (scene.camera.data.type == 'PERSP') | self.persp_camera = (scene.camera.data.type == 'PERSP') | ||||
| def set_thickness(self, sv, outer, inner): | def set_thickness(self, sv, outer, inner): | ||||
| fe = sv.first_svertex.get_fedge(sv.second_svertex) | fe = sv.first_svertex.get_fedge(sv.second_svertex) | ||||
| nature = fe.nature | nature = fe.nature | ||||
| if (nature & Nature.BORDER): | if (nature & Nature.BORDER): | ||||
| if self.__persp_camera: | if self.persp_camera: | ||||
| point = -sv.point_3d.copy() | point = -sv.point_3d.normalized() | ||||
| point.normalize() | |||||
| dir = point.dot(fe.normal_left) | dir = point.dot(fe.normal_left) | ||||
| else: | else: | ||||
| dir = fe.normal_left.z | dir = fe.normal_left.z | ||||
| Context not available. | |||||
| class ThicknessBlenderMixIn(ThicknessModifierMixIn): | class ThicknessBlenderMixIn(ThicknessModifierMixIn): | ||||
| def __init__(self, position, ratio): | def __init__(self, position, ratio): | ||||
| ThicknessModifierMixIn.__init__(self) | ThicknessModifierMixIn.__init__(self) | ||||
| self.__position = position | self.position = self.position = position | ||||
| self.__ratio = ratio | self.ratio = ratio | ||||
Not Done Inline ActionsShould be self.position = position. kjym3: Should be `self.position = position`. | |||||
| def blend_thickness(self, outer, inner, v): | def blend_thickness(self, outer, inner, v): | ||||
| v = self.blend(outer + inner, v) | v = self.blend(outer + inner, v) | ||||
| if self.__position == 'CENTER': | if self.position == 'CENTER': | ||||
| outer = v * 0.5 | outer = v * 0.5 | ||||
| inner = v - outer | inner = v - outer | ||||
| elif self.__position == 'INSIDE': | elif self.position == 'INSIDE': | ||||
| outer = 0 | outer = 0 | ||||
| inner = v | inner = v | ||||
| elif self.__position == 'OUTSIDE': | elif self.position == 'OUTSIDE': | ||||
| outer = v | outer = v | ||||
| inner = 0 | inner = 0 | ||||
| elif self.__position == 'RELATIVE': | elif self.position == 'RELATIVE': | ||||
| outer = v * self.__ratio | outer = v * self.ratio | ||||
| inner = v - outer | inner = v - outer | ||||
| else: | else: | ||||
| raise ValueError("unknown thickness position: " + self.__position) | raise ValueError("unknown thickness position: " + self.position) | ||||
| return outer, inner | return outer, inner | ||||
| def blend_set_thickness(self, svert, v): | |||||
kjym3Unsubmitted Not Done Inline ActionsYou can just remove the old blend_thickness() (and optionally rename blend_set_thickness() to blend_thickness()). There is no point to keep the "blend only" one. kjym3: You can just remove the old `blend_thickness()` (and optionally rename `blend_set_thickness()`… | |||||
| """ Chains blend_thickness and set_thickness together. This saves function calls | |||||
| and is generally more efficient, as both methods are called with almost the same | |||||
| arguments | |||||
| """ | |||||
| outer, inner = svert.attribute.thickness | |||||
| #fe = svert.first_svertex.get_fedge(svert.second_svertex) | |||||
| fe = svert.fedge | |||||
| v = self.blend(outer + inner, v) | |||||
| class BaseColorShader(ConstantColorShader): | # Part 1: blend | ||||
| pass | if self.position == "CENTER": | ||||
| outer = inner = v * 0.5 | |||||
| elif self.position == "INSIDE": | |||||
| outer, inner = 0, v | |||||
| elif self.position == "OUTSIDE": | |||||
| outer, inner = v, 0 | |||||
| elif self.position == "RELATIVE": | |||||
| outer, inner = v * self.ratio, v - (v * self.ratio) | |||||
| else: | |||||
| raise ValueError("unknown thickness position: " + position) | |||||
| # Part 2: set | |||||
| if (fe.nature & Nature.BORDER): | |||||
| if self.persp_camera: | |||||
| point = -svert.point_3d.normalized() | |||||
| dir = point.dot(fe.normal_left) | |||||
| else: | |||||
| dir = fe.normal_left.z | |||||
| if dir < 0.0: # the back side is visible | |||||
| outer, inner = inner, outer | |||||
| elif (fe.nature & Nature.SILHOUETTE): | |||||
| if fe.is_smooth: # TODO more tests needed | |||||
| outer, inner = inner, outer | |||||
| else: | |||||
| outer = inner = (outer + inner) / 2 | |||||
| svert.attribute.thickness = (outer, inner) | |||||
| class BaseThicknessShader(StrokeShader, ThicknessModifierMixIn): | class BaseThicknessShader(StrokeShader, ThicknessModifierMixIn): | ||||
| Context not available. | |||||
| StrokeShader.__init__(self) | StrokeShader.__init__(self) | ||||
| ThicknessModifierMixIn.__init__(self) | ThicknessModifierMixIn.__init__(self) | ||||
| if position == 'CENTER': | if position == 'CENTER': | ||||
| self.__outer = thickness * 0.5 | self.outer = thickness * 0.5 | ||||
| self.__inner = thickness - self.__outer | self.inner = thickness - self.outer | ||||
| elif position == 'INSIDE': | elif position == 'INSIDE': | ||||
| self.__outer = 0 | self.outer = 0 | ||||
| self.__inner = thickness | self.inner = thickness | ||||
| elif position == 'OUTSIDE': | elif position == 'OUTSIDE': | ||||
| self.__outer = thickness | self.outer = thickness | ||||
| self.__inner = 0 | self.inner = 0 | ||||
| elif position == 'RELATIVE': | elif position == 'RELATIVE': | ||||
| self.__outer = thickness * ratio | self.outer = thickness * ratio | ||||
| self.__inner = thickness - self.__outer | self.inner = thickness - self.outer | ||||
| else: | else: | ||||
| raise ValueError("unknown thickness position: " + self.position) | raise ValueError("unknown thickness position: " + position) | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| it = stroke.stroke_vertices_begin() | for svert in stroke: | ||||
| while not it.is_end: | self.set_thickness(svert, self.outer, self.inner) | ||||
| sv = it.object | |||||
| self.set_thickness(sv, self.__outer, self.__inner) | |||||
| it.increment() | |||||
| # Along Stroke modifiers | # Along Stroke modifiers | ||||
| def iter_t2d_along_stroke(stroke): | |||||
| total = stroke.length_2d | |||||
| distance = 0.0 | |||||
| it = stroke.stroke_vertices_begin() | |||||
| prev = it.object.point | |||||
| while not it.is_end: | |||||
| p = it.object.point | |||||
| distance += (prev - p).length | |||||
| prev = p.copy() # need a copy because the point can be altered | |||||
| t = min(distance / total, 1.0) if total > 0.0 else 0.0 | |||||
| yield it, t | |||||
| it.increment() | |||||
| class ColorAlongStrokeShader(ColorRampModifier): | class ColorAlongStrokeShader(ColorRampModifier): | ||||
| """Maps a ramp to the color of the stroke, using the curvilinear abscissa (t) """ | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| for it, t in iter_t2d_along_stroke(stroke): | for svert, t in zip(stroke, iter_t2d_along_stroke(stroke)): | ||||
| sv = it.object | a = svert.attribute.color | ||||
| a = sv.attribute.color | |||||
| b = self.evaluate(t) | b = self.evaluate(t) | ||||
| sv.attribute.color = self.blend_ramp(a, b) | svert.attribute.color = self.blend_ramp(a, b) | ||||
| class AlphaAlongStrokeShader(CurveMappingModifier): | class AlphaAlongStrokeShader(CurveMappingModifier): | ||||
| """Maps a curve to the alpha/transparancy of the stroke, using the curvilinear abscissa (t) """ | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| for it, t in iter_t2d_along_stroke(stroke): | for svert, t in zip(stroke, iter_t2d_along_stroke(stroke)): | ||||
| sv = it.object | a = svert.attribute.alpha | ||||
| a = sv.attribute.alpha | |||||
| b = self.evaluate(t) | b = self.evaluate(t) | ||||
| sv.attribute.alpha = self.blend(a, b) | svert.attribute.alpha = self.blend(a, b) | ||||
| class ThicknessAlongStrokeShader(ThicknessBlenderMixIn, CurveMappingModifier): | class ThicknessAlongStrokeShader(ThicknessBlenderMixIn, CurveMappingModifier): | ||||
| """Maps a curve to the thickness of the stroke, using the curvilinear abscissa (t) """ | |||||
| def __init__(self, thickness_position, thickness_ratio, | def __init__(self, thickness_position, thickness_ratio, | ||||
| blend, influence, mapping, invert, curve, value_min, value_max): | blend, influence, mapping, invert, curve, value_min, value_max): | ||||
| ThicknessBlenderMixIn.__init__(self, thickness_position, thickness_ratio) | ThicknessBlenderMixIn.__init__(self, thickness_position, thickness_ratio) | ||||
| CurveMappingModifier.__init__(self, blend, influence, mapping, invert, curve) | CurveMappingModifier.__init__(self, blend, influence, mapping, invert, curve) | ||||
| self.__value_min = value_min | self.value = Value(value_min, value_max, value_max - value_min) | ||||
| self.__value_max = value_max | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| for it, t in iter_t2d_along_stroke(stroke): | for svert, t in zip(stroke, iter_t2d_along_stroke(stroke)): | ||||
| sv = it.object | b = self.value.min + self.evaluate(t) * self.value.delta | ||||
| a = sv.attribute.thickness | self.blend_set_thickness(svert, b) | ||||
| b = self.__value_min + self.evaluate(t) * (self.__value_max - self.__value_min) | |||||
| c = self.blend_thickness(a[0], a[1], b) | |||||
| self.set_thickness(sv, c[0], c[1]) | |||||
| # Distance from Camera modifiers | |||||
| def iter_distance_from_camera(stroke, range_min, range_max): | |||||
| normfac = range_max - range_min # normalization factor | |||||
| it = stroke.stroke_vertices_begin() | |||||
| while not it.is_end: | |||||
| p = it.object.point_3d # in the camera coordinate | |||||
| distance = p.length | |||||
| if distance < range_min: | |||||
| t = 0.0 | |||||
| elif distance > range_max: | |||||
| t = 1.0 | |||||
| else: | |||||
| t = (distance - range_min) / normfac | |||||
| yield it, t | |||||
| it.increment() | |||||
| # -- Distance from Camera modifiers -- # | |||||
| class ColorDistanceFromCameraShader(ColorRampModifier): | class ColorDistanceFromCameraShader(ColorRampModifier): | ||||
| """Picks a color value from a ramp based on the vertex' distance from the camera """ | |||||
| def __init__(self, blend, influence, ramp, range_min, range_max): | def __init__(self, blend, influence, ramp, range_min, range_max): | ||||
| ColorRampModifier.__init__(self, blend, influence, ramp) | ColorRampModifier.__init__(self, blend, influence, ramp) | ||||
| self.__range_min = range_min | self.range = Range(range_min, range_max, range_max - range_min) | ||||
| self.__range_max = range_max | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| for it, t in iter_distance_from_camera(stroke, self.__range_min, self.__range_max): | it = iter_distance_from_camera(stroke, *self.range) | ||||
| sv = it.object | for svert, t in it: | ||||
| a = sv.attribute.color | a = svert.attribute.color | ||||
| b = self.evaluate(t) | b = self.evaluate(t) | ||||
| sv.attribute.color = self.blend_ramp(a, b) | svert.attribute.color = self.blend_ramp(a, b) | ||||
| class AlphaDistanceFromCameraShader(CurveMappingModifier): | class AlphaDistanceFromCameraShader(CurveMappingModifier): | ||||
| """Picks an alpha value from a curve based on the vertex' distance from the camera """ | |||||
| def __init__(self, blend, influence, mapping, invert, curve, range_min, range_max): | def __init__(self, blend, influence, mapping, invert, curve, range_min, range_max): | ||||
| CurveMappingModifier.__init__(self, blend, influence, mapping, invert, curve) | CurveMappingModifier.__init__(self, blend, influence, mapping, invert, curve) | ||||
| self.__range_min = range_min | self.range = Range(range_min, range_max, range_max - range_min) | ||||
| self.__range_max = range_max | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| for it, t in iter_distance_from_camera(stroke, self.__range_min, self.__range_max): | it = iter_distance_from_camera(stroke, *self.range) | ||||
| sv = it.object | for svert, t in it: | ||||
| a = sv.attribute.alpha | a = svert.attribute.alpha | ||||
| b = self.evaluate(t) | b = self.evaluate(t) | ||||
| sv.attribute.alpha = self.blend(a, b) | svert.attribute.alpha = self.blend(a, b) | ||||
| class ThicknessDistanceFromCameraShader(ThicknessBlenderMixIn, CurveMappingModifier): | class ThicknessDistanceFromCameraShader(ThicknessBlenderMixIn, CurveMappingModifier): | ||||
| """Picks a thickness value from a curve based on the vertex' distance from the camera """ | |||||
| def __init__(self, thickness_position, thickness_ratio, | def __init__(self, thickness_position, thickness_ratio, | ||||
| blend, influence, mapping, invert, curve, range_min, range_max, value_min, value_max): | blend, influence, mapping, invert, curve, range_min, range_max, value_min, value_max): | ||||
| ThicknessBlenderMixIn.__init__(self, thickness_position, thickness_ratio) | ThicknessBlenderMixIn.__init__(self, thickness_position, thickness_ratio) | ||||
| CurveMappingModifier.__init__(self, blend, influence, mapping, invert, curve) | CurveMappingModifier.__init__(self, blend, influence, mapping, invert, curve) | ||||
| self.__range_min = range_min | self.range = Range(range_min, range_max, range_max - range_min) | ||||
| self.__range_max = range_max | self.value = Value(value_min, value_max, value_max - value_min) | ||||
| self.__value_min = value_min | |||||
| self.__value_max = value_max | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| for it, t in iter_distance_from_camera(stroke, self.__range_min, self.__range_max): | for (svert, t) in iter_distance_from_camera(stroke, *self.range): | ||||
| sv = it.object | b = self.value.min + self.evaluate(t) * self.value.delta | ||||
| a = sv.attribute.thickness | self.blend_set_thickness(svert, b) | ||||
| b = self.__value_min + self.evaluate(t) * (self.__value_max - self.__value_min) | |||||
| c = self.blend_thickness(a[0], a[1], b) | |||||
| self.set_thickness(sv, c[0], c[1]) | |||||
| # Distance from Object modifiers | # Distance from Object modifiers | ||||
| def iter_distance_from_object(stroke, object, range_min, range_max): | |||||
| scene = getCurrentScene() | |||||
| mv = scene.camera.matrix_world.copy() # model-view matrix | |||||
| mv.invert() | |||||
| loc = mv * object.location # loc in the camera coordinate | |||||
| normfac = range_max - range_min # normalization factor | |||||
| it = stroke.stroke_vertices_begin() | |||||
| while not it.is_end: | |||||
| p = it.object.point_3d # in the camera coordinate | |||||
| distance = (p - loc).length | |||||
| if distance < range_min: | |||||
| t = 0.0 | |||||
| elif distance > range_max: | |||||
| t = 1.0 | |||||
| else: | |||||
| t = (distance - range_min) / normfac | |||||
| yield it, t | |||||
| it.increment() | |||||
| class ColorDistanceFromObjectShader(ColorRampModifier): | class ColorDistanceFromObjectShader(ColorRampModifier): | ||||
| """Picks a color value from a ramp based on the vertex' distance from a given object """ | |||||
| def __init__(self, blend, influence, ramp, target, range_min, range_max): | def __init__(self, blend, influence, ramp, target, range_min, range_max): | ||||
| ColorRampModifier.__init__(self, blend, influence, ramp) | ColorRampModifier.__init__(self, blend, influence, ramp) | ||||
| self.__target = target | if target is None: | ||||
| self.__range_min = range_min | raise ValueError("ColorDistanceFromObjectShader: target can't be None ") | ||||
| self.__range_max = range_max | self.range = Range(range_min, range_max, range_max - range_min) | ||||
| # construct a model-view matrix | |||||
| matrix = getCurrentScene().camera.matrix_world.inverted() | |||||
| # get the object location in the camera coordinate | |||||
| self.loc = matrix * target.location | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| if self.__target is None: | it = iter_distance_from_object(stroke, self.loc, *self.range) | ||||
| return | for svert, t in it: | ||||
| for it, t in iter_distance_from_object(stroke, self.__target, self.__range_min, self.__range_max): | a = svert.attribute.color | ||||
| sv = it.object | |||||
| a = sv.attribute.color | |||||
| b = self.evaluate(t) | b = self.evaluate(t) | ||||
| sv.attribute.color = self.blend_ramp(a, b) | svert.attribute.color = self.blend_ramp(a, b) | ||||
| class AlphaDistanceFromObjectShader(CurveMappingModifier): | class AlphaDistanceFromObjectShader(CurveMappingModifier): | ||||
| """Picks an alpha value from a curve based on the vertex' distance from a given object """ | |||||
| def __init__(self, blend, influence, mapping, invert, curve, target, range_min, range_max): | def __init__(self, blend, influence, mapping, invert, curve, target, range_min, range_max): | ||||
| CurveMappingModifier.__init__(self, blend, influence, mapping, invert, curve) | CurveMappingModifier.__init__(self, blend, influence, mapping, invert, curve) | ||||
| self.__target = target | if target is None: | ||||
| self.__range_min = range_min | raise ValueError("AlphaDistanceFromObjectShader: target can't be None ") | ||||
| self.__range_max = range_max | self.range = Range(range_min, range_max, range_max - range_min) | ||||
| # construct a model-view matrix | |||||
| matrix = getCurrentScene().camera.matrix_world.inverted() | |||||
| # get the object location in the camera coordinate | |||||
| self.loc = matrix * target.location | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| if self.__target is None: | it = iter_distance_from_object(stroke, self.loc, *self.range) | ||||
| return | for svert, t in it: | ||||
| for it, t in iter_distance_from_object(stroke, self.__target, self.__range_min, self.__range_max): | a = svert.attribute.alpha | ||||
| sv = it.object | |||||
| a = sv.attribute.alpha | |||||
| b = self.evaluate(t) | b = self.evaluate(t) | ||||
| sv.attribute.alpha = self.blend(a, b) | svert.attribute.alpha = self.blend(a, b) | ||||
| class ThicknessDistanceFromObjectShader(ThicknessBlenderMixIn, CurveMappingModifier): | class ThicknessDistanceFromObjectShader(ThicknessBlenderMixIn, CurveMappingModifier): | ||||
| """Picks a thickness value from a curve based on the vertex' distance from a given object """ | |||||
| def __init__(self, thickness_position, thickness_ratio, | def __init__(self, thickness_position, thickness_ratio, | ||||
| blend, influence, mapping, invert, curve, target, range_min, range_max, value_min, value_max): | blend, influence, mapping, invert, curve, target, range_min, range_max, value_min, value_max): | ||||
| ThicknessBlenderMixIn.__init__(self, thickness_position, thickness_ratio) | ThicknessBlenderMixIn.__init__(self, thickness_position, thickness_ratio) | ||||
| CurveMappingModifier.__init__(self, blend, influence, mapping, invert, curve) | CurveMappingModifier.__init__(self, blend, influence, mapping, invert, curve) | ||||
| self.__target = target | if target is None: | ||||
| self.__range_min = range_min | raise ValueError("ThicknessDistanceFromObjectShader: target can't be None ") | ||||
| self.__range_max = range_max | self.range = Range(range_min, range_max, range_max - range_min) | ||||
| self.__value_min = value_min | self.value = Value(value_min, value_max, value_max - value_min) | ||||
| self.__value_max = value_max | # construct a model-view matrix | ||||
| matrix = getCurrentScene().camera.matrix_world.inverted() | |||||
| # get the object location in the camera coordinate | |||||
| self.loc = matrix * target.location | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| if self.__target is None: | it = iter_distance_from_object(stroke, self.loc, *self.range) | ||||
| return | for svert, t in it: | ||||
| for it, t in iter_distance_from_object(stroke, self.__target, self.__range_min, self.__range_max): | b = self.value.min + self.evaluate(t) * self.value.delta | ||||
| sv = it.object | self.blend_set_thickness(svert, b) | ||||
| a = sv.attribute.thickness | |||||
| b = self.__value_min + self.evaluate(t) * (self.__value_max - self.__value_min) | |||||
| c = self.blend_thickness(a[0], a[1], b) | |||||
| self.set_thickness(sv, c[0], c[1]) | |||||
| # Material modifiers | # Material modifiers | ||||
| def iter_material_color(stroke, material_attribute): | |||||
| func = CurveMaterialF0D() | |||||
| it = stroke.stroke_vertices_begin() | |||||
| while not it.is_end: | |||||
| material = func(Interface0DIterator(it)) | |||||
| if material_attribute == 'DIFF': | |||||
| color = material.diffuse[0:3] | |||||
| elif material_attribute == 'SPEC': | |||||
| color = material.specular[0:3] | |||||
| else: | |||||
| raise ValueError("unexpected material attribute: " + material_attribute) | |||||
| yield it, color | |||||
| it.increment() | |||||
| def iter_material_value(stroke, material_attribute): | |||||
| func = CurveMaterialF0D() | |||||
| it = stroke.stroke_vertices_begin() | |||||
| while not it.is_end: | |||||
| material = func(Interface0DIterator(it)) | |||||
| if material_attribute == 'DIFF': | |||||
| r, g, b = material.diffuse[0:3] | |||||
| t = 0.35 * r + 0.45 * g + 0.2 * b | |||||
| elif material_attribute == 'DIFF_R': | |||||
| t = material.diffuse[0] | |||||
| elif material_attribute == 'DIFF_G': | |||||
| t = material.diffuse[1] | |||||
| elif material_attribute == 'DIFF_B': | |||||
| t = material.diffuse[2] | |||||
| elif material_attribute == 'SPEC': | |||||
| r, g, b = material.specular[0:3] | |||||
| t = 0.35 * r + 0.45 * g + 0.2 * b | |||||
| elif material_attribute == 'SPEC_R': | |||||
| t = material.specular[0] | |||||
| elif material_attribute == 'SPEC_G': | |||||
| t = material.specular[1] | |||||
| elif material_attribute == 'SPEC_B': | |||||
| t = material.specular[2] | |||||
| elif material_attribute == 'SPEC_HARDNESS': | |||||
| t = material.shininess | |||||
| elif material_attribute == 'ALPHA': | |||||
| t = material.diffuse[3] | |||||
| else: | |||||
| raise ValueError("unexpected material attribute: " + material_attribute) | |||||
| yield it, t | |||||
| it.increment() | |||||
| class ColorMaterialShader(ColorRampModifier): | class ColorMaterialShader(ColorRampModifier): | ||||
| """ Assigns a color to the vertices based on their underlying material """ | |||||
| def __init__(self, blend, influence, ramp, material_attribute, use_ramp): | def __init__(self, blend, influence, ramp, material_attribute, use_ramp): | ||||
| ColorRampModifier.__init__(self, blend, influence, ramp) | ColorRampModifier.__init__(self, blend, influence, ramp) | ||||
| self.__material_attribute = material_attribute | self.attribute = material_attribute | ||||
| self.__use_ramp = use_ramp | self.use_ramp = use_ramp | ||||
| self.func = CurveMaterialF0D() | |||||
| def shade(self, stroke): | |||||
| if self.__material_attribute in {'DIFF', 'SPEC'} and not self.__use_ramp: | def shade(self, stroke, attributes={'DIFF', 'SPEC'}): | ||||
| for it, b in iter_material_color(stroke, self.__material_attribute): | it = Interface0DIterator(stroke) | ||||
| sv = it.object | if not self.use_ramp and self.attribute in attributes: | ||||
| a = sv.attribute.color | for svert in it: | ||||
kjym3Unsubmitted Not Done Inline ActionsI am not comfortable with the changes here. I would keep iter_material_color() (or get_material_color() in line with get_material_value() as you proposed) for reusability and self-documentation. If speed really matters here, we still have an option to rewrite Python iterators in C/C++. kjym3: I am not comfortable with the changes here. I would keep `iter_material_color()` (or… | |||||
| sv.attribute.color = self.blend_ramp(a, b) | material = self.func(it) | ||||
| a = svert.attribute.color | |||||
| b = material.diffuse[0:3] if self.attribute == 'DIFF' else material.specular[0:3] | |||||
| svert.attribute.color = self.blend_ramp(a, b) | |||||
| else: | else: | ||||
| for it, t in iter_material_value(stroke, self.__material_attribute): | for svert in it: | ||||
| sv = it.object | t = get_material_value(self.func(it), self.attribute) | ||||
| a = sv.attribute.color | a = svert.attribute.color | ||||
| b = self.evaluate(t) | b = self.evaluate(t) | ||||
| sv.attribute.color = self.blend_ramp(a, b) | svert.attribute.color = self.blend_ramp(a, b) | ||||
| class AlphaMaterialShader(CurveMappingModifier): | class AlphaMaterialShader(CurveMappingModifier): | ||||
| """ Assigns an alpha value to the vertices based on their underlying material """ | |||||
| def __init__(self, blend, influence, mapping, invert, curve, material_attribute): | def __init__(self, blend, influence, mapping, invert, curve, material_attribute): | ||||
| CurveMappingModifier.__init__(self, blend, influence, mapping, invert, curve) | CurveMappingModifier.__init__(self, blend, influence, mapping, invert, curve) | ||||
| self.__material_attribute = material_attribute | self.attribute = material_attribute | ||||
| self.func = CurveMaterialF0D() | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| for it, t in iter_material_value(stroke, self.__material_attribute): | it = Interface0DIterator(stroke) | ||||
| sv = it.object | for svert in it: | ||||
| a = sv.attribute.alpha | t = get_material_value(self.func(it), self.attribute) | ||||
| a = svert.attribute.alpha | |||||
| b = self.evaluate(t) | b = self.evaluate(t) | ||||
| sv.attribute.alpha = self.blend(a, b) | svert.attribute.alpha = self.blend(a, b) | ||||
| class ThicknessMaterialShader(ThicknessBlenderMixIn, CurveMappingModifier): | class ThicknessMaterialShader(ThicknessBlenderMixIn, CurveMappingModifier): | ||||
| """ Assigns a thickness value to the vertices based on their underlying material """ | |||||
| def __init__(self, thickness_position, thickness_ratio, | def __init__(self, thickness_position, thickness_ratio, | ||||
| blend, influence, mapping, invert, curve, material_attribute, value_min, value_max): | blend, influence, mapping, invert, curve, material_attribute, value_min, value_max): | ||||
| ThicknessBlenderMixIn.__init__(self, thickness_position, thickness_ratio) | ThicknessBlenderMixIn.__init__(self, thickness_position, thickness_ratio) | ||||
| CurveMappingModifier.__init__(self, blend, influence, mapping, invert, curve) | CurveMappingModifier.__init__(self, blend, influence, mapping, invert, curve) | ||||
| self.__material_attribute = material_attribute | self.attribute = material_attribute | ||||
| self.__value_min = value_min | self.value = Value(value_min, value_max, value_max - value_min) | ||||
| self.__value_max = value_max | self.func = CurveMaterialF0D() | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| for it, t in iter_material_value(stroke, self.__material_attribute): | it = Interface0DIterator(stroke) | ||||
| sv = it.object | for svert in it: | ||||
| a = sv.attribute.thickness | t = get_material_value(self.func(it), self.attribute) | ||||
| b = self.__value_min + self.evaluate(t) * (self.__value_max - self.__value_min) | b = self.value.min + self.evaluate(t) * self.value.delta | ||||
| c = self.blend_thickness(a[0], a[1], b) | self.blend_set_thickness(svert, b) | ||||
| self.set_thickness(sv, c[0], c[1]) | |||||
| # Calligraphic thickness modifier | # Calligraphic thickness modifier | ||||
| class CalligraphicThicknessShader(ThicknessBlenderMixIn, ScalarBlendModifier): | class CalligraphicThicknessShader(ThicknessBlenderMixIn, ScalarBlendModifier): | ||||
| """Thickness modifier for achieving a calligraphy-like effect """ | |||||
| def __init__(self, thickness_position, thickness_ratio, | def __init__(self, thickness_position, thickness_ratio, | ||||
| blend, influence, orientation, thickness_min, thickness_max): | blend_type, influence, orientation, thickness_min, thickness_max): | ||||
| ThicknessBlenderMixIn.__init__(self, thickness_position, thickness_ratio) | ThicknessBlenderMixIn.__init__(self, thickness_position, thickness_ratio) | ||||
| ScalarBlendModifier.__init__(self, blend, influence) | ScalarBlendModifier.__init__(self, blend_type, influence) | ||||
| self.__orientation = mathutils.Vector((math.cos(orientation), math.sin(orientation))) | self.orientation = Vector((cos(orientation), sin(orientation))) | ||||
| self.__thickness_min = thickness_min | self.thickness = Thickness(thickness_min, thickness_max, thickness_max - thickness_min) | ||||
| self.__thickness_max = thickness_max | self.func = VertexOrientation2DF0D() | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| func = VertexOrientation2DF0D() | it = Interface0DIterator(stroke) | ||||
| it = stroke.stroke_vertices_begin() | for svert in it: | ||||
| while not it.is_end: | dir = self.func(it) | ||||
| dir = func(Interface0DIterator(it)) | l = dir.length | ||||
| orthDir = mathutils.Vector((-dir.y, dir.x)) | if l != 0.0: | ||||
| orthDir.normalize() | # make the direction orthogonal and normalize (this is the fastest way) | ||||
| fac = abs(orthDir * self.__orientation) | # the Vector.orthogonal() method doesn't work on 2D vectors for some reason | ||||
kjym3Unsubmitted Not Done Inline ActionsWe might ask Campbell to improve Vector.orthogonal() to accept a 2D vector by reminding him that BLI_mathutils has ortho_v2_v2() in C. kjym3: We might ask Campbell to improve Vector.orthogonal() to accept a 2D vector by reminding him… | |||||
| sv = it.object | dir.x, dir.y = -dir.y / l, dir.x / l | ||||
kjym3Unsubmitted Not Done Inline ActionsIt looks like normalization is missing. kjym3: It looks like normalization is missing. | |||||
flokkievidsAuthorUnsubmitted Not Done Inline Actionsit isn't, the division by l (the Vector's length) takes care of that. flokkievids: it isn't, the division by l (the Vector's length) takes care of that. | |||||
kjym3Unsubmitted Not Done Inline ActionsYou are right. kjym3: You are right. | |||||
| a = sv.attribute.thickness | fac = abs(dir * self.orientation) | ||||
| b = self.__thickness_min + fac * (self.__thickness_max - self.__thickness_min) | #b = max(0.0, self.thickness.min + fac * self.thickness.delta) | ||||
| b = max(b, 0.0) | # above max call seems unnecessary (depends on input from user, but I think | ||||
| c = self.blend_thickness(a[0], a[1], b) | # it's safe to assume that (thickness.min > 0 and thickness.delta > 0) | ||||
Not Done Inline ActionsIt might worth for speed to compute the coefficient 1 / self.wavelength * 2 * pi outside the for loop. kjym3: It might worth for speed to compute the coefficient `1 / self.wavelength * 2 * pi` outside the… | |||||
Not Done Inline Actionsspeedwise the difference is minimal (0.03 seconds on 10K iterations). it seems that the main speed advantage comes from interning the value (cutting out the self.* lookup in the loop) but because the loop itself gets a little bit more readable this way as well I think it's good to incorporate this change. flokkievids: speedwise the difference is minimal (0.03 seconds on 10K iterations). it seems that the main… | |||||
| self.set_thickness(sv, c[0], c[1]) | b = self.thickness.min + fac * self.thickness.delta | ||||
| it.increment() | else: | ||||
| b = self.thickness.min | |||||
| self.blend_set_thickness(svert, b) | |||||
| # Geometry modifiers | # Geometry modifiers | ||||
| def iter_distance_along_stroke(stroke): | |||||
| distance = 0.0 | |||||
| it = stroke.stroke_vertices_begin() | |||||
| prev = it.object.point | |||||
| while not it.is_end: | |||||
| p = it.object.point | |||||
| distance += (prev - p).length | |||||
| prev = p.copy() # need a copy because the point can be altered | |||||
| yield it, distance | |||||
| it.increment() | |||||
| class SinusDisplacementShader(StrokeShader): | class SinusDisplacementShader(StrokeShader): | ||||
| """Displaces the stroke in a sinewave-like shape """ | |||||
| def __init__(self, wavelength, amplitude, phase): | def __init__(self, wavelength, amplitude, phase): | ||||
| StrokeShader.__init__(self) | StrokeShader.__init__(self) | ||||
| self._wavelength = wavelength | self._wavelength = wavelength | ||||
| self._amplitude = amplitude | self._amplitude = amplitude | ||||
| self._phase = phase / wavelength * 2 * math.pi | self._phase = phase / wavelength * 2 * pi | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| # separately iterate over stroke vertices to compute normals | # to get reliable results, the normals have to be stored (need to investigate why) | ||||
kjym3Unsubmitted Not Done Inline ActionsWe need to store normals because we modify vertex locations which in turn alter normals. kjym3: We need to store normals because we modify vertex locations which in turn alter normals. | |||||
| buf = [] | normals = tuple(stroke_normal(stroke)) | ||||
| for it, distance in iter_distance_along_stroke(stroke): | distances = iter_distance_along_stroke(stroke) | ||||
| buf.append((it.object, distance, stroke_normal(it))) | for svert, distance, normal in zip(stroke, distances, normals): | ||||
| # iterate over the vertices again to displace them | n = normal * self._amplitude * cos(distance / self._wavelength * 2 * pi + self._phase) | ||||
| for v, distance, normal in buf: | svert.point += n | ||||
| n = normal * self._amplitude * math.cos(distance / self._wavelength * 2 * math.pi + self._phase) | |||||
| v.point = v.point + n | |||||
| stroke.update_length() | stroke.update_length() | ||||
| class PerlinNoise1DShader(StrokeShader): | class PerlinNoise1DShader(StrokeShader): | ||||
| def __init__(self, freq=10, amp=10, oct=4, angle=math.radians(45), seed=-1): | """ | ||||
| Displaces the stroke using the curvilinear abscissa. This means | |||||
| that lines with the same length and sampling interval will be | |||||
| identically distorded | |||||
| """ | |||||
| def __init__(self, freq=10, amp=10, oct=4, angle=radians(45), seed=-1): | |||||
| StrokeShader.__init__(self) | StrokeShader.__init__(self) | ||||
| self.__noise = Noise(seed) | self.noise = Noise(seed) | ||||
| self.__freq = freq | self.freq = freq | ||||
| self.__amp = amp | self.amp = amp | ||||
| self.__oct = oct | self.oct = oct | ||||
| self.__dir = mathutils.Vector((math.cos(angle), math.sin(angle))) | self.dir = Vector((cos(angle), sin(angle))) | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| length = stroke.length_2d | length = stroke.length_2d | ||||
| it = stroke.stroke_vertices_begin() | for svert in stroke: | ||||
| while not it.is_end: | nres = self.noise.turbulence1(length * svert.u, self.freq, self.amp, self.oct) | ||||
| v = it.object | svert.point += nres * self.dir | ||||
| nres = self.__noise.turbulence1(length * v.u, self.__freq, self.__amp, self.__oct) | |||||
| v.point = v.point + nres * self.__dir | |||||
| it.increment() | |||||
| stroke.update_length() | stroke.update_length() | ||||
| class PerlinNoise2DShader(StrokeShader): | class PerlinNoise2DShader(StrokeShader): | ||||
| def __init__(self, freq=10, amp=10, oct=4, angle=math.radians(45), seed=-1): | """ | ||||
| Displaces the stroke using the strokes coordinates. This means | |||||
| that in a scene no strokes will be distorded identically | |||||
| More information on the noise shaders can be found at | |||||
| freestyleintegration.wordpress.com/2011/09/25/development-updates-on-september-25/ | |||||
| """ | |||||
| def __init__(self, freq=10, amp=10, oct=4, angle=radians(45), seed=-1): | |||||
| StrokeShader.__init__(self) | StrokeShader.__init__(self) | ||||
| self.__noise = Noise(seed) | self.noise = Noise(seed) | ||||
| self.__freq = freq | self.freq = freq | ||||
| self.__amp = amp | self.amp = amp | ||||
| self.__oct = oct | self.oct = oct | ||||
| self.__dir = mathutils.Vector((math.cos(angle), math.sin(angle))) | self.dir = Vector((cos(angle), sin(angle))) | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| it = stroke.stroke_vertices_begin() | for svert in stroke: | ||||
| while not it.is_end: | projected = Vector((svert.projected_x, svert.projected_y)) | ||||
| v = it.object | nres = self.noise.turbulence2(projected, self.freq, self.amp, self.oct) | ||||
| vec = mathutils.Vector((v.projected_x, v.projected_y)) | svert.point += nres * self.dir | ||||
| nres = self.__noise.turbulence2(vec, self.__freq, self.__amp, self.__oct) | |||||
| v.point = v.point + nres * self.__dir | |||||
| it.increment() | |||||
| stroke.update_length() | stroke.update_length() | ||||
| class Offset2DShader(StrokeShader): | class Offset2DShader(StrokeShader): | ||||
| """Offsets the stroke by a given amount """ | |||||
| def __init__(self, start, end, x, y): | def __init__(self, start, end, x, y): | ||||
| StrokeShader.__init__(self) | StrokeShader.__init__(self) | ||||
| self.__start = start | self.start = start | ||||
| self.__end = end | self.end = end | ||||
| self.__xy = mathutils.Vector((x, y)) | self.xy = Vector((x, y)) | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| # first iterate over stroke vertices to compute normals | for svert, n in zip(stroke, tuple(stroke_normal(stroke))): | ||||
kjym3Unsubmitted Not Done Inline ActionsI would add a line like normals = tuple(stroke_normal(stroke)) before the for loop and leave comments highlighting the need of storing copies of normals. kjym3: I would add a line like `normals = tuple(stroke_normal(stroke))` before the `for` loop and… | |||||
| buf = [] | a = self.start + svert.u * (self.end - self.start) | ||||
| it = stroke.stroke_vertices_begin() | svert.point += (n * a) + self.xy | ||||
| while not it.is_end: | |||||
| buf.append((it.object, stroke_normal(it))) | |||||
| it.increment() | |||||
| # again iterate over the vertices to add displacement | |||||
| for v, n in buf: | |||||
| a = self.__start + v.u * (self.__end - self.__start) | |||||
| n = n * a | |||||
| v.point = v.point + n + self.__xy | |||||
| stroke.update_length() | stroke.update_length() | ||||
| class Transform2DShader(StrokeShader): | class Transform2DShader(StrokeShader): | ||||
| """Transforms the stroke (scale, rotation, location) around a given pivot point """ | |||||
| def __init__(self, pivot, scale_x, scale_y, angle, pivot_u, pivot_x, pivot_y): | def __init__(self, pivot, scale_x, scale_y, angle, pivot_u, pivot_x, pivot_y): | ||||
| StrokeShader.__init__(self) | StrokeShader.__init__(self) | ||||
| self.__pivot = pivot | self.pivot = pivot | ||||
| self.__scale_x = scale_x | self.scale = Vector((scale_x, scale_y)) | ||||
| self.__scale_y = scale_y | self.cos_theta = cos(angle) | ||||
| self.__angle = angle | self.sin_theta = sin(angle) | ||||
| self.__pivot_u = pivot_u | self.pivot_u = pivot_u | ||||
| self.__pivot_x = pivot_x | self.pivot_x = pivot_x | ||||
| self.__pivot_y = pivot_y | self.pivot_y = pivot_y | ||||
| if pivot not in {'START', 'END', 'CENTER', 'ABSOLUTE', 'PARAM'}: | |||||
| raise ValueError("expected pivot in {'START', 'END', 'CENTER', 'ABSOLUTE', 'PARAM'}, not" + pivot) | |||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| # determine the pivot of scaling and rotation operations | # determine the pivot of scaling and rotation operations | ||||
| if self.__pivot == 'START': | if self.pivot == 'START': | ||||
| it = stroke.stroke_vertices_begin() | pivot = stroke[0].point | ||||
| pivot = it.object.point | elif self.pivot == 'END': | ||||
| elif self.__pivot == 'END': | pivot = stroke[-1].point | ||||
| it = stroke.stroke_vertices_end() | elif self.pivot == 'CENTER': | ||||
| it.decrement() | pivot = (1 / len(stroke)) * sum((svert.point for svert in stroke), Vector((0.0, 0.0))) | ||||
| pivot = it.object.point | elif self.pivot == 'ABSOLUTE': | ||||
| elif self.__pivot == 'PARAM': | pivot = Vector((self.pivot_x, self.pivot_y)) | ||||
| p = None | elif self.pivot == 'PARAM': | ||||
| it = stroke.stroke_vertices_begin() | if self.pivot_u < stroke[0].u: | ||||
| while not it.is_end: | pivot = stroke[0].point | ||||
| prev = p | |||||
| v = it.object | |||||
| p = v.point | |||||
| u = v.u | |||||
| if self.__pivot_u < u: | |||||
| break | |||||
| it.increment() | |||||
| if prev is None: | |||||
| pivot = p | |||||
| else: | else: | ||||
| delta = u - self.__pivot_u | for prev, svert in pairwise(stroke): | ||||
| pivot = p + delta * (prev - p) | if self.pivot_u < svert.u: | ||||
| elif self.__pivot == 'CENTER': | break | ||||
| pivot = mathutils.Vector((0.0, 0.0)) | pivot = svert.point + (svert.u - self.pivot_u) * (prev.point - svert.point) | ||||
| n = 0 | |||||
| it = stroke.stroke_vertices_begin() | |||||
| while not it.is_end: | |||||
| p = it.object.point | |||||
| pivot = pivot + p | |||||
| n += 1 | |||||
| it.increment() | |||||
| pivot.x = pivot.x / n | |||||
| pivot.y = pivot.y / n | |||||
| elif self.__pivot == 'ABSOLUTE': | |||||
| pivot = mathutils.Vector((self.__pivot_x, self.__pivot_y)) | |||||
| # apply scaling and rotation operations | # apply scaling and rotation operations | ||||
| cos_theta = math.cos(self.__angle) | for svert in stroke: | ||||
| sin_theta = math.sin(self.__angle) | p = (svert.point - pivot) | ||||
| it = stroke.stroke_vertices_begin() | x = p.x * self.scale.x | ||||
| while not it.is_end: | y = p.y * self.scale.y | ||||
| v = it.object | p.x = x * self.cos_theta - y * self.sin_theta | ||||
| p = v.point | p.y = x * self.sin_theta + y * self.cos_theta | ||||
| p = p - pivot | svert.point = p + pivot | ||||
| x = p.x * self.__scale_x | |||||
| y = p.y * self.__scale_y | |||||
| p.x = x * cos_theta - y * sin_theta | |||||
| p.y = x * sin_theta + y * cos_theta | |||||
| v.point = p + pivot | |||||
| it.increment() | |||||
| stroke.update_length() | stroke.update_length() | ||||
| Context not available. | |||||
| class QuantitativeInvisibilityRangeUP1D(UnaryPredicate1D): | class QuantitativeInvisibilityRangeUP1D(UnaryPredicate1D): | ||||
| def __init__(self, qi_start, qi_end): | def __init__(self, qi_start, qi_end): | ||||
| UnaryPredicate1D.__init__(self) | UnaryPredicate1D.__init__(self) | ||||
| self.__getQI = QuantitativeInvisibilityF1D() | self.getQI = QuantitativeInvisibilityF1D() | ||||
| self.__qi_start = qi_start | self.qi_start = qi_start | ||||
| self.__qi_end = qi_end | self.qi_end = qi_end | ||||
| def __call__(self, inter): | def __call__(self, inter): | ||||
| qi = self.__getQI(inter) | qi = self.getQI(inter) | ||||
| return self.__qi_start <= qi <= self.__qi_end | return self.qi_start <= qi <= self.qi_end | ||||
| def join_unary_predicates(upred_list, bpred): | |||||
| if not upred_list: | |||||
| return None | |||||
| upred = upred_list[0] | |||||
| for p in upred_list[1:]: | |||||
| upred = bpred(upred, p) | |||||
| return upred | |||||
| class ObjectNamesUP1D(UnaryPredicate1D): | class ObjectNamesUP1D(UnaryPredicate1D): | ||||
| Context not available. | |||||
| return found | return found | ||||
| # Stroke caps | # -- Split by dashed line pattern -- # | ||||
| def iter_stroke_vertices(stroke): | |||||
| it = stroke.stroke_vertices_begin() | |||||
| prev_p = None | |||||
| while not it.is_end: | |||||
| sv = it.object | |||||
| p = sv.point | |||||
| if prev_p is None or (prev_p - p).length > 1e-6: | |||||
| yield sv | |||||
| prev_p = p.copy() | |||||
| it.increment() | |||||
| class RoundCapShader(StrokeShader): | |||||
| def round_cap_thickness(self, x): | |||||
| x = max(0.0, min(x, 1.0)) | |||||
| return math.sqrt(1.0 - (x ** 2.0)) | |||||
| def shade(self, stroke): | |||||
| # save the location and attribute of stroke vertices | |||||
| buffer = [] | |||||
| for sv in iter_stroke_vertices(stroke): | |||||
| buffer.append((mathutils.Vector(sv.point), StrokeAttribute(sv.attribute))) | |||||
| nverts = len(buffer) | |||||
| if nverts < 2: | |||||
| return | |||||
| # calculate the number of additional vertices to form caps | |||||
| R, L = stroke[0].attribute.thickness | |||||
| caplen_beg = (R + L) / 2.0 | |||||
| nverts_beg = max(5, int(R + L)) | |||||
| R, L = stroke[-1].attribute.thickness | |||||
| caplen_end = (R + L) / 2.0 | |||||
| nverts_end = max(5, int(R + L)) | |||||
| # adjust the total number of stroke vertices | |||||
| stroke.resample(nverts + nverts_beg + nverts_end) | |||||
| # restore the location and attribute of the original vertices | |||||
| for i in range(nverts): | |||||
| p, attr = buffer[i] | |||||
| stroke[nverts_beg + i].point = p | |||||
| stroke[nverts_beg + i].attribute = attr | |||||
| # reshape the cap at the beginning of the stroke | |||||
| q, attr = buffer[1] | |||||
| p, attr = buffer[0] | |||||
| d = p - q | |||||
| d = d / d.length * caplen_beg | |||||
| n = 1.0 / nverts_beg | |||||
| R, L = attr.thickness | |||||
| for i in range(nverts_beg): | |||||
| t = (nverts_beg - i) * n | |||||
| stroke[i].point = p + d * t | |||||
| r = self.round_cap_thickness((nverts_beg - i + 1) * n) | |||||
| stroke[i].attribute = attr | |||||
| stroke[i].attribute.thickness = (R * r, L * r) | |||||
| # reshape the cap at the end of the stroke | |||||
| q, attr = buffer[-2] | |||||
| p, attr = buffer[-1] | |||||
| d = p - q | |||||
| d = d / d.length * caplen_end | |||||
| n = 1.0 / nverts_end | |||||
| R, L = attr.thickness | |||||
| for i in range(nverts_end): | |||||
| t = (nverts_end - i) * n | |||||
| stroke[-i - 1].point = p + d * t | |||||
| r = self.round_cap_thickness((nverts_end - i + 1) * n) | |||||
| stroke[-i - 1].attribute = attr | |||||
| stroke[-i - 1].attribute.thickness = (R * r, L * r) | |||||
| # update the curvilinear 2D length of each vertex | |||||
| stroke.update_length() | |||||
| class SquareCapShader(StrokeShader): | |||||
| def shade(self, stroke): | |||||
| # save the location and attribute of stroke vertices | |||||
| buffer = [] | |||||
| for sv in iter_stroke_vertices(stroke): | |||||
| buffer.append((mathutils.Vector(sv.point), StrokeAttribute(sv.attribute))) | |||||
| nverts = len(buffer) | |||||
| if nverts < 2: | |||||
| return | |||||
| # calculate the number of additional vertices to form caps | |||||
| R, L = stroke[0].attribute.thickness | |||||
| caplen_beg = (R + L) / 2.0 | |||||
| nverts_beg = 1 | |||||
| R, L = stroke[-1].attribute.thickness | |||||
| caplen_end = (R + L) / 2.0 | |||||
| nverts_end = 1 | |||||
| # adjust the total number of stroke vertices | |||||
| stroke.resample(nverts + nverts_beg + nverts_end) | |||||
| # restore the location and attribute of the original vertices | |||||
| for i in range(nverts): | |||||
| p, attr = buffer[i] | |||||
| stroke[nverts_beg + i].point = p | |||||
| stroke[nverts_beg + i].attribute = attr | |||||
| # reshape the cap at the beginning of the stroke | |||||
| q, attr = buffer[1] | |||||
| p, attr = buffer[0] | |||||
| d = p - q | |||||
| stroke[0].point = p + d / d.length * caplen_beg | |||||
| stroke[0].attribute = attr | |||||
| # reshape the cap at the end of the stroke | |||||
| q, attr = buffer[-2] | |||||
| p, attr = buffer[-1] | |||||
| d = p - q | |||||
| stroke[-1].point = p + d / d.length * caplen_beg | |||||
| stroke[-1].attribute = attr | |||||
| # update the curvilinear 2D length of each vertex | |||||
| stroke.update_length() | |||||
| # Split by dashed line pattern | |||||
| class SplitPatternStartingUP0D(UnaryPredicate0D): | class SplitPatternStartingUP0D(UnaryPredicate0D): | ||||
| def __init__(self, controller): | def __init__(self, controller): | ||||
| Context not available. | |||||
| self.sampling = float(sampling) | self.sampling = float(sampling) | ||||
| k = len(pattern) // 2 | k = len(pattern) // 2 | ||||
| n = k * 2 | n = k * 2 | ||||
| self.start_pos = [pattern[i] + pattern[i + 1] for i in range(0, n, 2)] | # Note: need to verify these are correct | ||||
| self.stop_pos = [pattern[i] for i in range(0, n, 2)] | #self.start_pos = [pattern[i] + pattern[i + 1] for i in range(0, n, 2)] | ||||
| #self.stop_pos = [pattern[i] for i in range(0, n, 2)] | |||||
| self.start_pos = [prev + current for prev, current in pairwise(pattern)] | |||||
| self.stop_pos = [prev for prev, _ in pairwise(pattern)] | |||||
kjym3Unsubmitted Not Done Inline ActionsThe original expressions look easier to read and still I don't expect performance loss with them. kjym3: The original expressions look easier to read and still I don't expect performance loss with… | |||||
flokkievidsAuthorUnsubmitted Not Done Inline Actionsuse of indices and range in this manner is generally discouraged and considered unpythonic. the problem here is that pairwise doesn't consider the step value. I'll look into it further (maybe itertools has something for this) flokkievids: use of indices and range in this manner is generally discouraged and considered unpythonic. | |||||
| self.init() | self.init() | ||||
| def init(self): | def init(self): | ||||
| Context not available. | |||||
| self._pattern = pattern | self._pattern = pattern | ||||
| def shade(self, stroke): | def shade(self, stroke): | ||||
| index = 0 # pattern index | |||||
| start = 0.0 # 2D curvilinear length | start = 0.0 # 2D curvilinear length | ||||
| visible = True | visible = True | ||||
| """ The extra 'sampling' term is added below, because the | |||||
| visibility attribute of the i-th vertex refers to the | |||||
| visibility of the stroke segment between the i-th and | |||||
| (i+1)-th vertices. """ | |||||
| sampling = 1.0 | sampling = 1.0 | ||||
| it = stroke.stroke_vertices_begin(sampling) | it = stroke.stroke_vertices_begin(sampling) | ||||
| while not it.is_end: | for svert, pattern in zip(it, cycle(self._pattern)): | ||||
kjym3Unsubmitted Not Done Inline ActionsThis for loop does not seem correct. Please double-check visual results. The use of cycle() may still be relevant, e.g.: pattern_cycle = cycle(self._pattern)
pattern = next(pattern_cycle)
visible = True
for svert in it:
pos = it.t # curvilinear abscissa
if pos - start + sampling > pattern:
start = pos
pattern = next(pattern_cycle)
visible = not visiblekjym3: This `for` loop does not seem correct. Please double-check visual results.
The use of `cycle… | |||||
flokkievidsAuthorUnsubmitted Not Done Inline ActionsYou're right. flokkievids: You're right. | |||||
| pos = it.t # curvilinear abscissa | pos = it.t # curvilinear abscissa | ||||
| # The extra 'sampling' term is added below, because the | |||||
| # visibility attribute of the i-th vertex refers to the | if pos - start + sampling > pattern: | ||||
| # visibility of the stroke segment between the i-th and | |||||
| # (i+1)-th vertices. | |||||
| if pos - start + sampling > self._pattern[index]: | |||||
| start = pos | start = pos | ||||
| index += 1 | |||||
| if index == len(self._pattern): | |||||
| index = 0 | |||||
| visible = not visible | visible = not visible | ||||
| if not visible: | if not visible: | ||||
| it.object.attribute.visible = visible | it.object.attribute.visible = False | ||||
| it.increment() | |||||
| # predicates for chaining | # predicates for chaining | ||||
| Context not available. | |||||
| if denom < 1e-6: | if denom < 1e-6: | ||||
| return False | return False | ||||
| x = (dir1 * dir2) / denom | x = (dir1 * dir2) / denom | ||||
| return math.acos(min(max(x, -1.0), 1.0)) > self._angle | return acos(bound(-1.0, x, 1.0)) > self._angle | ||||
| class AndBP1D(BinaryPredicate1D): | |||||
| def __init__(self, pred1, pred2): | |||||
| BinaryPredicate1D.__init__(self) | |||||
| self.__pred1 = pred1 | |||||
| self.__pred2 = pred2 | |||||
| def __call__(self, i1, i2): | |||||
| return self.__pred1(i1, i2) and self.__pred2(i1, i2) | |||||
| # predicates for selection | # predicates for selection | ||||
| class LengthThresholdUP1D(UnaryPredicate1D): | class LengthThresholdUP1D(UnaryPredicate1D): | ||||
| def __init__(self, length_min=None, length_max=None): | def __init__(self, length_min=None, length_max=None): | ||||
| UnaryPredicate1D.__init__(self) | UnaryPredicate1D.__init__(self) | ||||
| Context not available. | |||||
| class FaceMarkBothUP1D(UnaryPredicate1D): | class FaceMarkBothUP1D(UnaryPredicate1D): | ||||
| def __call__(self, inter): # ViewEdge | def __call__(self, inter: ViewEdge): | ||||
| fe = inter.first_fedge | |||||
| while fe is not None: | while fe is not None: | ||||
| if fe.is_smooth: | if fe.is_smooth: | ||||
| if fe.face_mark: | if fe.face_mark: | ||||
| Context not available. | |||||
| class FaceMarkOneUP1D(UnaryPredicate1D): | class FaceMarkOneUP1D(UnaryPredicate1D): | ||||
| def __call__(self, inter): # ViewEdge | def __call__(self, inter: ViewEdge): | ||||
| fe = inter.first_fedge | fe = inter.first_fedge | ||||
| while fe is not None: | while fe is not None: | ||||
| if fe.is_smooth: | if fe.is_smooth: | ||||
| Context not available. | |||||
| class MaterialBoundaryUP0D(UnaryPredicate0D): | class MaterialBoundaryUP0D(UnaryPredicate0D): | ||||
| def __call__(self, it): | def __call__(self, it): | ||||
| if it.is_begin: | try: | ||||
| return False | it.decrement() | ||||
| it_prev = Interface0DIterator(it) | prev = it.object | ||||
| it_prev.decrement() | svert = next(it) | ||||
| v = it.object | succ = next(it) | ||||
| it.increment() | except (RuntimeError, StopIteration) as e: | ||||
kjym3Unsubmitted Not Done Inline ActionsUsing .is_begin and .is_end would better document boundary conditions. Please consider revising the code here. kjym3: Using `.is_begin` and `.is_end` would better document boundary conditions. Please consider… | |||||
| if it.is_end: | # iterator at start or begin | ||||
| return False | return False | ||||
| fe = v.get_fedge(it_prev.object) | |||||
| fe = svert.get_fedge(prev) | |||||
| idx1 = fe.material_index if fe.is_smooth else fe.material_index_left | idx1 = fe.material_index if fe.is_smooth else fe.material_index_left | ||||
| fe = v.get_fedge(it.object) | fe = svert.get_fedge(succ) | ||||
| idx2 = fe.material_index if fe.is_smooth else fe.material_index_left | idx2 = fe.material_index if fe.is_smooth else fe.material_index_left | ||||
| return idx1 != idx2 | return idx1 != idx2 | ||||
| Context not available. | |||||
| self._func = Curvature2DAngleF0D() | self._func = Curvature2DAngleF0D() | ||||
| def __call__(self, inter): | def __call__(self, inter): | ||||
| angle = math.pi - self._func(inter) | angle = pi - self._func(inter) | ||||
| if self._angle_min is not None and angle < self._angle_min: | if self._angle_min is not None and angle < self._angle_min: | ||||
| return True | return True | ||||
| if self._angle_max is not None and angle > self._angle_max: | if self._angle_max is not None and angle > self._angle_max: | ||||
| Context not available. | |||||
| upred = ExternalContourUP1D() | upred = ExternalContourUP1D() | ||||
| edge_type_criteria.append(NotUP1D(upred) if lineset.exclude_external_contour else upred) | edge_type_criteria.append(NotUP1D(upred) if lineset.exclude_external_contour else upred) | ||||
| if lineset.edge_type_combination == 'OR': | if lineset.edge_type_combination == 'OR': | ||||
| upred = join_unary_predicates(edge_type_criteria, OrUP1D) | upred = OrUP1D(*edge_type_criteria) | ||||
| else: | else: | ||||
| upred = join_unary_predicates(edge_type_criteria, AndUP1D) | upred = AndUP1D(*edge_type_criteria) | ||||
| if upred is not None: | if upred is not None: | ||||
| if lineset.edge_type_negation == 'EXCLUSIVE': | if lineset.edge_type_negation == 'EXCLUSIVE': | ||||
| upred = NotUP1D(upred) | upred = NotUP1D(upred) | ||||
| Context not available. | |||||
| upred = FaceMarkBothUP1D() | upred = FaceMarkBothUP1D() | ||||
| else: | else: | ||||
| upred = FaceMarkOneUP1D() | upred = FaceMarkOneUP1D() | ||||
| if lineset.face_mark_negation == 'EXCLUSIVE': | if lineset.face_mark_negation == 'EXCLUSIVE': | ||||
| upred = NotUP1D(upred) | upred = NotUP1D(upred) | ||||
| selection_criteria.append(upred) | selection_criteria.append(upred) | ||||
| # prepare selection criteria by group of objects | # prepare selection criteria by group of objects | ||||
| if lineset.select_by_group: | if lineset.select_by_group: | ||||
| if lineset.group is not None: | if lineset.group is not None: | ||||
| names = dict((ob.name, True) for ob in lineset.group.objects) | names = {ob.name: True for ob in lineset.group.objects} | ||||
| upred = ObjectNamesUP1D(names, lineset.group_negation == 'EXCLUSIVE') | upred = ObjectNamesUP1D(names, lineset.group_negation == 'EXCLUSIVE') | ||||
| selection_criteria.append(upred) | selection_criteria.append(upred) | ||||
| # prepare selection criteria by image border | # prepare selection criteria by image border | ||||
| if lineset.select_by_image_border: | if lineset.select_by_image_border: | ||||
| xmin, ymin, xmax, ymax = ContextFunctions.get_border() | upred = WithinImageBoundaryUP1D(*ContextFunctions.get_border()) | ||||
| upred = WithinImageBoundaryUP1D(xmin, ymin, xmax, ymax) | |||||
| selection_criteria.append(upred) | selection_criteria.append(upred) | ||||
| # select feature edges | # select feature edges | ||||
| upred = join_unary_predicates(selection_criteria, AndUP1D) | upred = AndUP1D(*selection_criteria) | ||||
| if upred is None: | if upred is None: | ||||
| upred = TrueUP1D() | upred = TrueUP1D() | ||||
| Operators.select(upred) | Operators.select(upred) | ||||
| Context not available. | |||||
| elif m.type == '2D_TRANSFORM': | elif m.type == '2D_TRANSFORM': | ||||
| shaders_list.append(Transform2DShader( | shaders_list.append(Transform2DShader( | ||||
| m.pivot, m.scale_x, m.scale_y, m.angle, m.pivot_u, m.pivot_x, m.pivot_y)) | m.pivot, m.scale_x, m.scale_y, m.angle, m.pivot_u, m.pivot_x, m.pivot_y)) | ||||
| if linestyle.use_texture: | |||||
| has_tex = False | |||||
| for slot in linestyle.texture_slots: | |||||
| if slot is not None: | |||||
| shaders_list.append(BlenderTextureShader(slot)) | |||||
| has_tex = True | |||||
| if has_tex: | |||||
| shaders_list.append(StrokeTextureStepShader(linestyle.texture_spacing)) | |||||
| color = linestyle.color | |||||
| if (not linestyle.use_chaining) or (linestyle.chaining == 'PLAIN' and linestyle.use_same_object): | if (not linestyle.use_chaining) or (linestyle.chaining == 'PLAIN' and linestyle.use_same_object): | ||||
| thickness_position = linestyle.thickness_position | thickness_position = linestyle.thickness_position | ||||
| else: | else: | ||||
| Context not available. | |||||
| if bpy.app.debug_freestyle: | if bpy.app.debug_freestyle: | ||||
| print("Warning: Thickness position options are applied when chaining is disabled\n" | print("Warning: Thickness position options are applied when chaining is disabled\n" | ||||
| " or the Plain chaining is used with the Same Object option enabled.") | " or the Plain chaining is used with the Same Object option enabled.") | ||||
| shaders_list.append(BaseColorShader(color.r, color.g, color.b, linestyle.alpha)) | |||||
| shaders_list.append(ConstantColorShader(*(linestyle.color), alpha=linestyle.alpha)) | |||||
| shaders_list.append(BaseThicknessShader(linestyle.thickness, thickness_position, | shaders_list.append(BaseThicknessShader(linestyle.thickness, thickness_position, | ||||
| linestyle.thickness_ratio)) | linestyle.thickness_ratio)) | ||||
| # -- Modifiers and textures -- # | |||||
| for m in linestyle.color_modifiers: | for m in linestyle.color_modifiers: | ||||
| if not m.use: | if not m.use: | ||||
| continue | continue | ||||
| Context not available. | |||||
| shaders_list.append(ColorDistanceFromCameraShader( | shaders_list.append(ColorDistanceFromCameraShader( | ||||
| m.blend, m.influence, m.color_ramp, | m.blend, m.influence, m.color_ramp, | ||||
| m.range_min, m.range_max)) | m.range_min, m.range_max)) | ||||
| elif m.type == 'DISTANCE_FROM_OBJECT': | elif m.type == 'DISTANCE_FROM_OBJECT' and m.target is not None: | ||||
| shaders_list.append(ColorDistanceFromObjectShader( | shaders_list.append(ColorDistanceFromObjectShader( | ||||
| m.blend, m.influence, m.color_ramp, m.target, | m.blend, m.influence, m.color_ramp, m.target, | ||||
| m.range_min, m.range_max)) | m.range_min, m.range_max)) | ||||
| Context not available. | |||||
| shaders_list.append(AlphaDistanceFromCameraShader( | shaders_list.append(AlphaDistanceFromCameraShader( | ||||
| m.blend, m.influence, m.mapping, m.invert, m.curve, | m.blend, m.influence, m.mapping, m.invert, m.curve, | ||||
| m.range_min, m.range_max)) | m.range_min, m.range_max)) | ||||
| elif m.type == 'DISTANCE_FROM_OBJECT': | elif m.type == 'DISTANCE_FROM_OBJECT' and m.target is not None: | ||||
| shaders_list.append(AlphaDistanceFromObjectShader( | shaders_list.append(AlphaDistanceFromObjectShader( | ||||
| m.blend, m.influence, m.mapping, m.invert, m.curve, m.target, | m.blend, m.influence, m.mapping, m.invert, m.curve, m.target, | ||||
| m.range_min, m.range_max)) | m.range_min, m.range_max)) | ||||
| Context not available. | |||||
| thickness_position, linestyle.thickness_ratio, | thickness_position, linestyle.thickness_ratio, | ||||
| m.blend, m.influence, m.mapping, m.invert, m.curve, | m.blend, m.influence, m.mapping, m.invert, m.curve, | ||||
| m.range_min, m.range_max, m.value_min, m.value_max)) | m.range_min, m.range_max, m.value_min, m.value_max)) | ||||
| elif m.type == 'DISTANCE_FROM_OBJECT': | elif m.type == 'DISTANCE_FROM_OBJECT' and m.target is not None: | ||||
| shaders_list.append(ThicknessDistanceFromObjectShader( | shaders_list.append(ThicknessDistanceFromObjectShader( | ||||
| thickness_position, linestyle.thickness_ratio, | thickness_position, linestyle.thickness_ratio, | ||||
| m.blend, m.influence, m.mapping, m.invert, m.curve, m.target, | m.blend, m.influence, m.mapping, m.invert, m.curve, m.target, | ||||
| Context not available. | |||||
| thickness_position, linestyle.thickness_ratio, | thickness_position, linestyle.thickness_ratio, | ||||
| m.blend, m.influence, | m.blend, m.influence, | ||||
| m.orientation, m.thickness_min, m.thickness_max)) | m.orientation, m.thickness_min, m.thickness_max)) | ||||
| if linestyle.use_texture: | |||||
| textures = tuple(BlenderTextureShader(slot) for slot in linestyle.texture_slots if slot is not None) | |||||
| if textures: | |||||
| shaders_list.extend(textures) | |||||
| shaders_list.append(StrokeTextureStepShader(linestyle.texture_spacing)) | |||||
| # -- Stroke caps -- # | |||||
| if linestyle.caps == 'ROUND': | if linestyle.caps == 'ROUND': | ||||
| shaders_list.append(RoundCapShader()) | shaders_list.append(RoundCapShader()) | ||||
| elif linestyle.caps == 'SQUARE': | elif linestyle.caps == 'SQUARE': | ||||
| shaders_list.append(SquareCapShader()) | shaders_list.append(SquareCapShader()) | ||||
| # -- Dashed line -- # | |||||
| if linestyle.use_dashed_line: | if linestyle.use_dashed_line: | ||||
| pattern = [] | pattern = [] | ||||
| if linestyle.dash1 > 0 and linestyle.gap1 > 0: | if linestyle.dash1 > 0 and linestyle.gap1 > 0: | ||||
| Context not available. | |||||
I guess we could unify these named tuples by a more general name, e.g. 'Bound' or 'Limit'.