Changeset View
Changeset View
Standalone View
Standalone View
intern/cycles/render/camera.cpp
| Show First 20 Lines • Show All 90 Lines • ▼ Show 20 Lines | NODE_DEFINE(Camera) | ||||
| SOCKET_TRANSFORM_ARRAY(motion, "Motion", array<Transform>()); | SOCKET_TRANSFORM_ARRAY(motion, "Motion", array<Transform>()); | ||||
| SOCKET_FLOAT(aperture_ratio, "Aperture Ratio", 1.0f); | SOCKET_FLOAT(aperture_ratio, "Aperture Ratio", 1.0f); | ||||
| static NodeEnum type_enum; | static NodeEnum type_enum; | ||||
| type_enum.insert("perspective", CAMERA_PERSPECTIVE); | type_enum.insert("perspective", CAMERA_PERSPECTIVE); | ||||
| type_enum.insert("orthograph", CAMERA_ORTHOGRAPHIC); | type_enum.insert("orthograph", CAMERA_ORTHOGRAPHIC); | ||||
| type_enum.insert("panorama", CAMERA_PANORAMA); | type_enum.insert("panorama", CAMERA_PANORAMA); | ||||
| SOCKET_ENUM(type, "Type", type_enum, CAMERA_PERSPECTIVE); | SOCKET_ENUM(camera_type, "Type", type_enum, CAMERA_PERSPECTIVE); | ||||
| static NodeEnum panorama_type_enum; | static NodeEnum panorama_type_enum; | ||||
| panorama_type_enum.insert("equirectangular", PANORAMA_EQUIRECTANGULAR); | panorama_type_enum.insert("equirectangular", PANORAMA_EQUIRECTANGULAR); | ||||
| panorama_type_enum.insert("mirrorball", PANORAMA_MIRRORBALL); | panorama_type_enum.insert("mirrorball", PANORAMA_MIRRORBALL); | ||||
| panorama_type_enum.insert("fisheye_equidistant", PANORAMA_FISHEYE_EQUIDISTANT); | panorama_type_enum.insert("fisheye_equidistant", PANORAMA_FISHEYE_EQUIDISTANT); | ||||
| panorama_type_enum.insert("fisheye_equisolid", PANORAMA_FISHEYE_EQUISOLID); | panorama_type_enum.insert("fisheye_equisolid", PANORAMA_FISHEYE_EQUISOLID); | ||||
| SOCKET_ENUM(panorama_type, "Panorama Type", panorama_type_enum, PANORAMA_EQUIRECTANGULAR); | SOCKET_ENUM(panorama_type, "Panorama Type", panorama_type_enum, PANORAMA_EQUIRECTANGULAR); | ||||
| Show All 33 Lines | NODE_DEFINE(Camera) | ||||
| SOCKET_FLOAT(viewplane.bottom, "Viewplane Bottom", 0); | SOCKET_FLOAT(viewplane.bottom, "Viewplane Bottom", 0); | ||||
| SOCKET_FLOAT(viewplane.top, "Viewplane Top", 0); | SOCKET_FLOAT(viewplane.top, "Viewplane Top", 0); | ||||
| SOCKET_FLOAT(border.left, "Border Left", 0); | SOCKET_FLOAT(border.left, "Border Left", 0); | ||||
| SOCKET_FLOAT(border.right, "Border Right", 0); | SOCKET_FLOAT(border.right, "Border Right", 0); | ||||
| SOCKET_FLOAT(border.bottom, "Border Bottom", 0); | SOCKET_FLOAT(border.bottom, "Border Bottom", 0); | ||||
| SOCKET_FLOAT(border.top, "Border Top", 0); | SOCKET_FLOAT(border.top, "Border Top", 0); | ||||
| SOCKET_FLOAT(viewport_camera_border.left, "Viewport Border Left", 0); | |||||
| SOCKET_FLOAT(viewport_camera_border.right, "Viewport Border Right", 0); | |||||
| SOCKET_FLOAT(viewport_camera_border.bottom, "Viewport Border Bottom", 0); | |||||
| SOCKET_FLOAT(viewport_camera_border.top, "Viewport Border Top", 0); | |||||
| SOCKET_FLOAT(offscreen_dicing_scale, "Offscreen Dicing Scale", 1.0f); | SOCKET_FLOAT(offscreen_dicing_scale, "Offscreen Dicing Scale", 1.0f); | ||||
| SOCKET_INT(full_width, "Full Width", 1024); | |||||
| SOCKET_INT(full_height, "Full Height", 512); | |||||
| SOCKET_INT(resolution, "Resolution", 1); | |||||
| SOCKET_BOOLEAN(use_perspective_motion, "Use Perspective Motion", false); | |||||
| return type; | return type; | ||||
| } | } | ||||
| Camera::Camera() : Node(node_type) | Camera::Camera() : Node(node_type) | ||||
| { | { | ||||
| shutter_table_offset = TABLE_OFFSET_INVALID; | shutter_table_offset = TABLE_OFFSET_INVALID; | ||||
| width = 1024; | width = 1024; | ||||
| Show All 16 Lines | Camera::Camera() : Node(node_type) | ||||
| cameratoworld = transform_identity(); | cameratoworld = transform_identity(); | ||||
| worldtoraster = projection_identity(); | worldtoraster = projection_identity(); | ||||
| full_rastertocamera = projection_identity(); | full_rastertocamera = projection_identity(); | ||||
| dx = make_float3(0.0f, 0.0f, 0.0f); | dx = make_float3(0.0f, 0.0f, 0.0f); | ||||
| dy = make_float3(0.0f, 0.0f, 0.0f); | dy = make_float3(0.0f, 0.0f, 0.0f); | ||||
| need_update = true; | |||||
| need_device_update = true; | need_device_update = true; | ||||
| need_flags_update = true; | need_flags_update = true; | ||||
| previous_need_motion = -1; | previous_need_motion = -1; | ||||
| memset((void *)&kernel_camera, 0, sizeof(kernel_camera)); | memset((void *)&kernel_camera, 0, sizeof(kernel_camera)); | ||||
| } | } | ||||
| Camera::~Camera() | Camera::~Camera() | ||||
| { | { | ||||
| } | } | ||||
| void Camera::compute_auto_viewplane() | void Camera::compute_auto_viewplane() | ||||
| { | { | ||||
| if (type == CAMERA_PANORAMA) { | if (camera_type == CAMERA_PANORAMA) { | ||||
| viewplane.left = 0.0f; | viewplane.left = 0.0f; | ||||
| viewplane.right = 1.0f; | viewplane.right = 1.0f; | ||||
| viewplane.bottom = 0.0f; | viewplane.bottom = 0.0f; | ||||
| viewplane.top = 1.0f; | viewplane.top = 1.0f; | ||||
| } | } | ||||
| else { | else { | ||||
| float aspect = (float)width / (float)height; | float aspect = (float)width / (float)height; | ||||
| if (width >= height) { | if (width >= height) { | ||||
| Show All 17 Lines | void Camera::update(Scene *scene) | ||||
| if (previous_need_motion != need_motion) { | if (previous_need_motion != need_motion) { | ||||
| /* scene's motion model could have been changed since previous device | /* scene's motion model could have been changed since previous device | ||||
| * camera update this could happen for example in case when one render | * camera update this could happen for example in case when one render | ||||
| * layer has got motion pass and another not */ | * layer has got motion pass and another not */ | ||||
| need_device_update = true; | need_device_update = true; | ||||
| } | } | ||||
| if (!need_update) | if (!is_modified()) | ||||
| return; | return; | ||||
| /* Full viewport to camera border in the viewport. */ | /* Full viewport to camera border in the viewport. */ | ||||
| Transform fulltoborder = transform_from_viewplane(viewport_camera_border); | Transform fulltoborder = transform_from_viewplane(viewport_camera_border); | ||||
| Transform bordertofull = transform_inverse(fulltoborder); | Transform bordertofull = transform_inverse(fulltoborder); | ||||
| /* ndc to raster */ | /* ndc to raster */ | ||||
| Transform ndctoraster = transform_scale(width, height, 1.0f) * bordertofull; | Transform ndctoraster = transform_scale(width, height, 1.0f) * bordertofull; | ||||
| Transform full_ndctoraster = transform_scale(full_width, full_height, 1.0f) * bordertofull; | Transform full_ndctoraster = transform_scale(full_width, full_height, 1.0f) * bordertofull; | ||||
| /* raster to screen */ | /* raster to screen */ | ||||
| Transform screentondc = fulltoborder * transform_from_viewplane(viewplane); | Transform screentondc = fulltoborder * transform_from_viewplane(viewplane); | ||||
| Transform screentoraster = ndctoraster * screentondc; | Transform screentoraster = ndctoraster * screentondc; | ||||
| Transform rastertoscreen = transform_inverse(screentoraster); | Transform rastertoscreen = transform_inverse(screentoraster); | ||||
| Transform full_screentoraster = full_ndctoraster * screentondc; | Transform full_screentoraster = full_ndctoraster * screentondc; | ||||
| Transform full_rastertoscreen = transform_inverse(full_screentoraster); | Transform full_rastertoscreen = transform_inverse(full_screentoraster); | ||||
| /* screen to camera */ | /* screen to camera */ | ||||
| ProjectionTransform cameratoscreen; | ProjectionTransform cameratoscreen; | ||||
| if (type == CAMERA_PERSPECTIVE) | if (camera_type == CAMERA_PERSPECTIVE) | ||||
| cameratoscreen = projection_perspective(fov, nearclip, farclip); | cameratoscreen = projection_perspective(fov, nearclip, farclip); | ||||
| else if (type == CAMERA_ORTHOGRAPHIC) | else if (camera_type == CAMERA_ORTHOGRAPHIC) | ||||
| cameratoscreen = projection_orthographic(nearclip, farclip); | cameratoscreen = projection_orthographic(nearclip, farclip); | ||||
| else | else | ||||
| cameratoscreen = projection_identity(); | cameratoscreen = projection_identity(); | ||||
| ProjectionTransform screentocamera = projection_inverse(cameratoscreen); | ProjectionTransform screentocamera = projection_inverse(cameratoscreen); | ||||
| rastertocamera = screentocamera * rastertoscreen; | rastertocamera = screentocamera * rastertoscreen; | ||||
| full_rastertocamera = screentocamera * full_rastertoscreen; | full_rastertocamera = screentocamera * full_rastertoscreen; | ||||
| cameratoraster = screentoraster * cameratoscreen; | cameratoraster = screentoraster * cameratoscreen; | ||||
| cameratoworld = matrix; | cameratoworld = matrix; | ||||
| screentoworld = cameratoworld * screentocamera; | screentoworld = cameratoworld * screentocamera; | ||||
| rastertoworld = cameratoworld * rastertocamera; | rastertoworld = cameratoworld * rastertocamera; | ||||
| ndctoworld = rastertoworld * ndctoraster; | ndctoworld = rastertoworld * ndctoraster; | ||||
| /* note we recompose matrices instead of taking inverses of the above, this | /* note we recompose matrices instead of taking inverses of the above, this | ||||
| * is needed to avoid inverting near degenerate matrices that happen due to | * is needed to avoid inverting near degenerate matrices that happen due to | ||||
| * precision issues with large scenes */ | * precision issues with large scenes */ | ||||
| worldtocamera = transform_inverse(matrix); | worldtocamera = transform_inverse(matrix); | ||||
| worldtoscreen = cameratoscreen * worldtocamera; | worldtoscreen = cameratoscreen * worldtocamera; | ||||
| worldtondc = screentondc * worldtoscreen; | worldtondc = screentondc * worldtoscreen; | ||||
| worldtoraster = ndctoraster * worldtondc; | worldtoraster = ndctoraster * worldtondc; | ||||
| /* differentials */ | /* differentials */ | ||||
| if (type == CAMERA_ORTHOGRAPHIC) { | if (camera_type == CAMERA_ORTHOGRAPHIC) { | ||||
| dx = transform_perspective_direction(&rastertocamera, make_float3(1, 0, 0)); | dx = transform_perspective_direction(&rastertocamera, make_float3(1, 0, 0)); | ||||
| dy = transform_perspective_direction(&rastertocamera, make_float3(0, 1, 0)); | dy = transform_perspective_direction(&rastertocamera, make_float3(0, 1, 0)); | ||||
| full_dx = transform_perspective_direction(&full_rastertocamera, make_float3(1, 0, 0)); | full_dx = transform_perspective_direction(&full_rastertocamera, make_float3(1, 0, 0)); | ||||
| full_dy = transform_perspective_direction(&full_rastertocamera, make_float3(0, 1, 0)); | full_dy = transform_perspective_direction(&full_rastertocamera, make_float3(0, 1, 0)); | ||||
| } | } | ||||
| else if (type == CAMERA_PERSPECTIVE) { | else if (camera_type == CAMERA_PERSPECTIVE) { | ||||
| dx = transform_perspective(&rastertocamera, make_float3(1, 0, 0)) - | dx = transform_perspective(&rastertocamera, make_float3(1, 0, 0)) - | ||||
| transform_perspective(&rastertocamera, make_float3(0, 0, 0)); | transform_perspective(&rastertocamera, make_float3(0, 0, 0)); | ||||
| dy = transform_perspective(&rastertocamera, make_float3(0, 1, 0)) - | dy = transform_perspective(&rastertocamera, make_float3(0, 1, 0)) - | ||||
| transform_perspective(&rastertocamera, make_float3(0, 0, 0)); | transform_perspective(&rastertocamera, make_float3(0, 0, 0)); | ||||
| full_dx = transform_perspective(&full_rastertocamera, make_float3(1, 0, 0)) - | full_dx = transform_perspective(&full_rastertocamera, make_float3(1, 0, 0)) - | ||||
| transform_perspective(&full_rastertocamera, make_float3(0, 0, 0)); | transform_perspective(&full_rastertocamera, make_float3(0, 0, 0)); | ||||
| full_dy = transform_perspective(&full_rastertocamera, make_float3(0, 1, 0)) - | full_dy = transform_perspective(&full_rastertocamera, make_float3(0, 1, 0)) - | ||||
| transform_perspective(&full_rastertocamera, make_float3(0, 0, 0)); | transform_perspective(&full_rastertocamera, make_float3(0, 0, 0)); | ||||
| } | } | ||||
| else { | else { | ||||
| dx = make_float3(0.0f, 0.0f, 0.0f); | dx = make_float3(0.0f, 0.0f, 0.0f); | ||||
| dy = make_float3(0.0f, 0.0f, 0.0f); | dy = make_float3(0.0f, 0.0f, 0.0f); | ||||
| } | } | ||||
| dx = transform_direction(&cameratoworld, dx); | dx = transform_direction(&cameratoworld, dx); | ||||
| dy = transform_direction(&cameratoworld, dy); | dy = transform_direction(&cameratoworld, dy); | ||||
| full_dx = transform_direction(&cameratoworld, full_dx); | full_dx = transform_direction(&cameratoworld, full_dx); | ||||
| full_dy = transform_direction(&cameratoworld, full_dy); | full_dy = transform_direction(&cameratoworld, full_dy); | ||||
| if (type == CAMERA_PERSPECTIVE) { | if (camera_type == CAMERA_PERSPECTIVE) { | ||||
| float3 v = transform_perspective(&full_rastertocamera, | float3 v = transform_perspective(&full_rastertocamera, | ||||
| make_float3(full_width, full_height, 1.0f)); | make_float3(full_width, full_height, 1.0f)); | ||||
| frustum_right_normal = normalize(make_float3(v.z, 0.0f, -v.x)); | frustum_right_normal = normalize(make_float3(v.z, 0.0f, -v.x)); | ||||
| frustum_top_normal = normalize(make_float3(0.0f, v.z, -v.y)); | frustum_top_normal = normalize(make_float3(0.0f, v.z, -v.y)); | ||||
| } | } | ||||
| /* Compute kernel camera data. */ | /* Compute kernel camera data. */ | ||||
| Show All 18 Lines | void Camera::update(Scene *scene) | ||||
| /* Test if any of the transforms are actually different. */ | /* Test if any of the transforms are actually different. */ | ||||
| bool have_motion = false; | bool have_motion = false; | ||||
| for (size_t i = 0; i < motion.size(); i++) { | for (size_t i = 0; i < motion.size(); i++) { | ||||
| have_motion = have_motion || motion[i] != matrix; | have_motion = have_motion || motion[i] != matrix; | ||||
| } | } | ||||
| if (need_motion == Scene::MOTION_PASS) { | if (need_motion == Scene::MOTION_PASS) { | ||||
| /* TODO(sergey): Support perspective (zoom, fov) motion. */ | /* TODO(sergey): Support perspective (zoom, fov) motion. */ | ||||
| if (type == CAMERA_PANORAMA) { | if (camera_type == CAMERA_PANORAMA) { | ||||
| if (have_motion) { | if (have_motion) { | ||||
| kcam->motion_pass_pre = transform_inverse(motion[0]); | kcam->motion_pass_pre = transform_inverse(motion[0]); | ||||
| kcam->motion_pass_post = transform_inverse(motion[motion.size() - 1]); | kcam->motion_pass_post = transform_inverse(motion[motion.size() - 1]); | ||||
| } | } | ||||
| else { | else { | ||||
| kcam->motion_pass_pre = kcam->worldtocamera; | kcam->motion_pass_pre = kcam->worldtocamera; | ||||
| kcam->motion_pass_post = kcam->worldtocamera; | kcam->motion_pass_post = kcam->worldtocamera; | ||||
| } | } | ||||
| Show All 12 Lines | void Camera::update(Scene *scene) | ||||
| else if (need_motion == Scene::MOTION_BLUR) { | else if (need_motion == Scene::MOTION_BLUR) { | ||||
| if (have_motion) { | if (have_motion) { | ||||
| kernel_camera_motion.resize(motion.size()); | kernel_camera_motion.resize(motion.size()); | ||||
| transform_motion_decompose(kernel_camera_motion.data(), motion.data(), motion.size()); | transform_motion_decompose(kernel_camera_motion.data(), motion.data(), motion.size()); | ||||
| kcam->num_motion_steps = motion.size(); | kcam->num_motion_steps = motion.size(); | ||||
| } | } | ||||
| /* TODO(sergey): Support other types of camera. */ | /* TODO(sergey): Support other types of camera. */ | ||||
| if (use_perspective_motion && type == CAMERA_PERSPECTIVE) { | if (use_perspective_motion && camera_type == CAMERA_PERSPECTIVE) { | ||||
| /* TODO(sergey): Move to an utility function and de-duplicate with | /* TODO(sergey): Move to an utility function and de-duplicate with | ||||
| * calculation above. | * calculation above. | ||||
| */ | */ | ||||
| ProjectionTransform screentocamera_pre = projection_inverse( | ProjectionTransform screentocamera_pre = projection_inverse( | ||||
| projection_perspective(fov_pre, nearclip, farclip)); | projection_perspective(fov_pre, nearclip, farclip)); | ||||
| ProjectionTransform screentocamera_post = projection_inverse( | ProjectionTransform screentocamera_post = projection_inverse( | ||||
| projection_perspective(fov_post, nearclip, farclip)); | projection_perspective(fov_post, nearclip, farclip)); | ||||
| kcam->perspective_pre = screentocamera_pre * rastertoscreen; | kcam->perspective_pre = screentocamera_pre * rastertoscreen; | ||||
| kcam->perspective_post = screentocamera_post * rastertoscreen; | kcam->perspective_post = screentocamera_post * rastertoscreen; | ||||
| kcam->have_perspective_motion = 1; | kcam->have_perspective_motion = 1; | ||||
| } | } | ||||
| } | } | ||||
| /* depth of field */ | /* depth of field */ | ||||
| kcam->aperturesize = aperturesize; | kcam->aperturesize = aperturesize; | ||||
| kcam->focaldistance = focaldistance; | kcam->focaldistance = focaldistance; | ||||
| kcam->blades = (blades < 3) ? 0.0f : blades; | kcam->blades = (blades < 3) ? 0.0f : blades; | ||||
| kcam->bladesrotation = bladesrotation; | kcam->bladesrotation = bladesrotation; | ||||
| /* motion blur */ | /* motion blur */ | ||||
| kcam->shuttertime = (need_motion == Scene::MOTION_BLUR) ? shuttertime : -1.0f; | kcam->shuttertime = (need_motion == Scene::MOTION_BLUR) ? shuttertime : -1.0f; | ||||
| /* type */ | /* type */ | ||||
| kcam->type = type; | kcam->type = camera_type; | ||||
| /* anamorphic lens bokeh */ | /* anamorphic lens bokeh */ | ||||
| kcam->inv_aperture_ratio = 1.0f / aperture_ratio; | kcam->inv_aperture_ratio = 1.0f / aperture_ratio; | ||||
| /* panorama */ | /* panorama */ | ||||
| kcam->panorama_type = panorama_type; | kcam->panorama_type = panorama_type; | ||||
| kcam->fisheye_fov = fisheye_fov; | kcam->fisheye_fov = fisheye_fov; | ||||
| kcam->fisheye_lens = fisheye_lens; | kcam->fisheye_lens = fisheye_lens; | ||||
| ▲ Show 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | void Camera::update(Scene *scene) | ||||
| /* Camera in volume. */ | /* Camera in volume. */ | ||||
| kcam->is_inside_volume = 0; | kcam->is_inside_volume = 0; | ||||
| /* Rolling shutter effect */ | /* Rolling shutter effect */ | ||||
| kcam->rolling_shutter_type = rolling_shutter_type; | kcam->rolling_shutter_type = rolling_shutter_type; | ||||
| kcam->rolling_shutter_duration = rolling_shutter_duration; | kcam->rolling_shutter_duration = rolling_shutter_duration; | ||||
| /* Set further update flags */ | /* Set further update flags */ | ||||
| need_update = false; | clear_modified(); | ||||
| need_device_update = true; | need_device_update = true; | ||||
| need_flags_update = true; | need_flags_update = true; | ||||
| previous_need_motion = need_motion; | previous_need_motion = need_motion; | ||||
| } | } | ||||
| void Camera::device_update(Device * /* device */, DeviceScene *dscene, Scene *scene) | void Camera::device_update(Device * /* device */, DeviceScene *dscene, Scene *scene) | ||||
| { | { | ||||
| update(scene); | update(scene); | ||||
| ▲ Show 20 Lines • Show All 66 Lines • ▼ Show 20 Lines | |||||
| } | } | ||||
| void Camera::device_free(Device * /*device*/, DeviceScene *dscene, Scene *scene) | void Camera::device_free(Device * /*device*/, DeviceScene *dscene, Scene *scene) | ||||
| { | { | ||||
| scene->lookup_tables->remove_table(&shutter_table_offset); | scene->lookup_tables->remove_table(&shutter_table_offset); | ||||
| dscene->camera_motion.free(); | dscene->camera_motion.free(); | ||||
| } | } | ||||
| bool Camera::modified(const Camera &cam) | |||||
| { | |||||
| return !Node::equals(cam); | |||||
| } | |||||
| bool Camera::motion_modified(const Camera &cam) | |||||
| { | |||||
| return !((motion == cam.motion) && (use_perspective_motion == cam.use_perspective_motion)); | |||||
| } | |||||
| void Camera::tag_update() | void Camera::tag_update() | ||||
| { | { | ||||
| need_update = true; | socket_modified = ~0; | ||||
| } | } | ||||
| float3 Camera::transform_raster_to_world(float raster_x, float raster_y) | float3 Camera::transform_raster_to_world(float raster_x, float raster_y) | ||||
| { | { | ||||
| float3 D, P; | float3 D, P; | ||||
| if (type == CAMERA_PERSPECTIVE) { | if (camera_type == CAMERA_PERSPECTIVE) { | ||||
| D = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f)); | D = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f)); | ||||
| float3 Pclip = normalize(D); | float3 Pclip = normalize(D); | ||||
| P = make_float3(0.0f, 0.0f, 0.0f); | P = make_float3(0.0f, 0.0f, 0.0f); | ||||
| /* TODO(sergey): Aperture support? */ | /* TODO(sergey): Aperture support? */ | ||||
| P = transform_point(&cameratoworld, P); | P = transform_point(&cameratoworld, P); | ||||
| D = normalize(transform_direction(&cameratoworld, D)); | D = normalize(transform_direction(&cameratoworld, D)); | ||||
| /* TODO(sergey): Clipping is conditional in kernel, and hence it could | /* TODO(sergey): Clipping is conditional in kernel, and hence it could | ||||
| * be mistakes in here, currently leading to wrong camera-in-volume | * be mistakes in here, currently leading to wrong camera-in-volume | ||||
| * detection. | * detection. | ||||
| */ | */ | ||||
| P += nearclip * D / Pclip.z; | P += nearclip * D / Pclip.z; | ||||
| } | } | ||||
| else if (type == CAMERA_ORTHOGRAPHIC) { | else if (camera_type == CAMERA_ORTHOGRAPHIC) { | ||||
| D = make_float3(0.0f, 0.0f, 1.0f); | D = make_float3(0.0f, 0.0f, 1.0f); | ||||
| /* TODO(sergey): Aperture support? */ | /* TODO(sergey): Aperture support? */ | ||||
| P = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f)); | P = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f)); | ||||
| P = transform_point(&cameratoworld, P); | P = transform_point(&cameratoworld, P); | ||||
| D = normalize(transform_direction(&cameratoworld, D)); | D = normalize(transform_direction(&cameratoworld, D)); | ||||
| } | } | ||||
| else { | else { | ||||
| assert(!"unsupported camera type"); | assert(!"unsupported camera type"); | ||||
| } | } | ||||
| return P; | return P; | ||||
| } | } | ||||
| BoundBox Camera::viewplane_bounds_get() | BoundBox Camera::viewplane_bounds_get() | ||||
| { | { | ||||
| /* TODO(sergey): This is all rather stupid, but is there a way to perform | /* TODO(sergey): This is all rather stupid, but is there a way to perform | ||||
| * checks we need in a more clear and smart fashion? */ | * checks we need in a more clear and smart fashion? */ | ||||
| BoundBox bounds = BoundBox::empty; | BoundBox bounds = BoundBox::empty; | ||||
| if (type == CAMERA_PANORAMA) { | if (camera_type == CAMERA_PANORAMA) { | ||||
| if (use_spherical_stereo == false) { | if (use_spherical_stereo == false) { | ||||
| bounds.grow(make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w)); | bounds.grow(make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w)); | ||||
| } | } | ||||
| else { | else { | ||||
| float half_eye_distance = interocular_distance * 0.5f; | float half_eye_distance = interocular_distance * 0.5f; | ||||
| bounds.grow(make_float3( | bounds.grow(make_float3( | ||||
| cameratoworld.x.w + half_eye_distance, cameratoworld.y.w, cameratoworld.z.w)); | cameratoworld.x.w + half_eye_distance, cameratoworld.y.w, cameratoworld.z.w)); | ||||
| bounds.grow(make_float3( | bounds.grow(make_float3( | ||||
| cameratoworld.z.w, cameratoworld.y.w + half_eye_distance, cameratoworld.z.w)); | cameratoworld.z.w, cameratoworld.y.w + half_eye_distance, cameratoworld.z.w)); | ||||
| bounds.grow(make_float3( | bounds.grow(make_float3( | ||||
| cameratoworld.x.w - half_eye_distance, cameratoworld.y.w, cameratoworld.z.w)); | cameratoworld.x.w - half_eye_distance, cameratoworld.y.w, cameratoworld.z.w)); | ||||
| bounds.grow(make_float3( | bounds.grow(make_float3( | ||||
| cameratoworld.x.w, cameratoworld.y.w - half_eye_distance, cameratoworld.z.w)); | cameratoworld.x.w, cameratoworld.y.w - half_eye_distance, cameratoworld.z.w)); | ||||
| } | } | ||||
| } | } | ||||
| else { | else { | ||||
| bounds.grow(transform_raster_to_world(0.0f, 0.0f)); | bounds.grow(transform_raster_to_world(0.0f, 0.0f)); | ||||
| bounds.grow(transform_raster_to_world(0.0f, (float)height)); | bounds.grow(transform_raster_to_world(0.0f, (float)height)); | ||||
| bounds.grow(transform_raster_to_world((float)width, (float)height)); | bounds.grow(transform_raster_to_world((float)width, (float)height)); | ||||
| bounds.grow(transform_raster_to_world((float)width, 0.0f)); | bounds.grow(transform_raster_to_world((float)width, 0.0f)); | ||||
| if (type == CAMERA_PERSPECTIVE) { | if (camera_type == CAMERA_PERSPECTIVE) { | ||||
| /* Center point has the most distance in local Z axis, | /* Center point has the most distance in local Z axis, | ||||
| * use it to construct bounding box/ | * use it to construct bounding box/ | ||||
| */ | */ | ||||
| bounds.grow(transform_raster_to_world(0.5f * width, 0.5f * height)); | bounds.grow(transform_raster_to_world(0.5f * width, 0.5f * height)); | ||||
| } | } | ||||
| } | } | ||||
| return bounds; | return bounds; | ||||
| } | } | ||||
| float Camera::world_to_raster_size(float3 P) | float Camera::world_to_raster_size(float3 P) | ||||
| { | { | ||||
| float res = 1.0f; | float res = 1.0f; | ||||
| if (type == CAMERA_ORTHOGRAPHIC) { | if (camera_type == CAMERA_ORTHOGRAPHIC) { | ||||
| res = min(len(full_dx), len(full_dy)); | res = min(len(full_dx), len(full_dy)); | ||||
| if (offscreen_dicing_scale > 1.0f) { | if (offscreen_dicing_scale > 1.0f) { | ||||
| float3 p = transform_point(&worldtocamera, P); | float3 p = transform_point(&worldtocamera, P); | ||||
| float3 v = transform_perspective(&full_rastertocamera, | float3 v = transform_perspective(&full_rastertocamera, | ||||
| make_float3(full_width, full_height, 0.0f)); | make_float3(full_width, full_height, 0.0f)); | ||||
| /* Create point clamped to frustum */ | /* Create point clamped to frustum */ | ||||
| float3 c; | float3 c; | ||||
| c.x = max(-v.x, min(v.x, p.x)); | c.x = max(-v.x, min(v.x, p.x)); | ||||
| c.y = max(-v.y, min(v.y, p.y)); | c.y = max(-v.y, min(v.y, p.y)); | ||||
| c.z = max(0.0f, p.z); | c.z = max(0.0f, p.z); | ||||
| float f_dist = len(p - c) / sqrtf((v.x * v.x + v.y * v.y) * 0.5f); | float f_dist = len(p - c) / sqrtf((v.x * v.x + v.y * v.y) * 0.5f); | ||||
| if (f_dist > 0.0f) { | if (f_dist > 0.0f) { | ||||
| res += res * f_dist * (offscreen_dicing_scale - 1.0f); | res += res * f_dist * (offscreen_dicing_scale - 1.0f); | ||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| else if (type == CAMERA_PERSPECTIVE) { | else if (camera_type == CAMERA_PERSPECTIVE) { | ||||
| /* Calculate as if point is directly ahead of the camera. */ | /* Calculate as if point is directly ahead of the camera. */ | ||||
| float3 raster = make_float3(0.5f * full_width, 0.5f * full_height, 0.0f); | float3 raster = make_float3(0.5f * full_width, 0.5f * full_height, 0.0f); | ||||
| float3 Pcamera = transform_perspective(&full_rastertocamera, raster); | float3 Pcamera = transform_perspective(&full_rastertocamera, raster); | ||||
| /* dDdx */ | /* dDdx */ | ||||
| float3 Ddiff = transform_direction(&cameratoworld, Pcamera); | float3 Ddiff = transform_direction(&cameratoworld, Pcamera); | ||||
| float3 dx = len_squared(full_dx) < len_squared(full_dy) ? full_dx : full_dy; | float3 dx = len_squared(full_dx) < len_squared(full_dy) ? full_dx : full_dy; | ||||
| float3 dDdx = normalize(Ddiff + dx) - normalize(Ddiff); | float3 dDdx = normalize(Ddiff + dx) - normalize(Ddiff); | ||||
| ▲ Show 20 Lines • Show All 60 Lines • ▼ Show 20 Lines | if (offscreen_dicing_scale > 1.0f) { | ||||
| } | } | ||||
| } | } | ||||
| if (f_dist > 0.0f) { | if (f_dist > 0.0f) { | ||||
| res += len(dDdx - dot(dDdx, D) * D) * f_dist * (offscreen_dicing_scale - 1.0f); | res += len(dDdx - dot(dDdx, D) * D) * f_dist * (offscreen_dicing_scale - 1.0f); | ||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| else if (type == CAMERA_PANORAMA) { | else if (camera_type == CAMERA_PANORAMA) { | ||||
| float3 D = transform_point(&worldtocamera, P); | float3 D = transform_point(&worldtocamera, P); | ||||
| float dist = len(D); | float dist = len(D); | ||||
| Ray ray = {{0}}; | Ray ray = {{0}}; | ||||
| /* Distortion can become so great that the results become meaningless, there | /* Distortion can become so great that the results become meaningless, there | ||||
| * may be a better way to do this, but calculating differentials from the | * may be a better way to do this, but calculating differentials from the | ||||
| * point directly ahead seems to produce good enough results. */ | * point directly ahead seems to produce good enough results. */ | ||||
| ▲ Show 20 Lines • Show All 59 Lines • Show Last 20 Lines | |||||