xref: /aosp_15_r20/frameworks/rs/tests/java_api/Refocus/src/com/android/rs/test/camera_response.rsh (revision e1eccf28f96817838ad6867f7f39d2351ec11f56)
1#ifndef CAMERA_RESPONSE_RSH
2#define CAMERA_RESPONSE_RSH
3
4// Adapt //depot/google3/geo/lightfield/refocus/image/camera_response.h|cc to
5// Render Script.
6//
7// Implementation of a three-parameter Camera Response Function as described in:
8//
9//     Comparametric Equations with Practical Applications
10//     in Quantigraphic Image Processing,
11//     by Steve Mann, IEEE TOIP 2000.
12//
13// The difference here is that we use a normalized model such
14// that f(0) = 0 and f(1) = 1. The function and its inverse is
15// given with respect to parameters a,b,c as:
16//
17//     f(q) = ((e^b + 1) * q^a / (e^b * q^a + 1))^c
18//     f^-1(q) = (x^(1/c) / (e^b - e^b * x^(1/c) + 1))^(1/a)
19//
20// The typical work flow is to first remove the response from a photograph,
21// do some computation in the radiance space, then re-apply the response to
22// the processed photo.
23
24// Quantize a float in [0,1] using an integer in [0,kLUTLength-1].
25static const int kLUTLength = 256;
26// Default parameters of the camera response function.
27static const float kDefaultA = 2.0f;
28static const float kDefaultB = 4.0f;
29static const float kDefaultC = 3.0f;
30
31rs_allocation g_lut_apply_crf_float;
32rs_allocation g_lut_remove_crf_float;
33
34typedef struct CameraResponse {
35  float a;
36  float b;
37  float c;
38  float lut_apply_crf_float[kLUTLength + 1];
39  float lut_remove_crf_float[kLUTLength + 1];
40  int lut_apply_crf_int[kLUTLength];
41  int lut_remove_crf_int[kLUTLength];
42} CameraResponse_t;
43
44static inline float ApplyCRF(float v, const CameraResponse_t* camera_response) {
45  const float pow_va = pow(v, camera_response->a);
46  const float exp_b = exp(camera_response->b);
47  const float x1 = (exp_b + 1.0f) * pow_va;
48  const float x2 = exp_b * pow_va + 1.0f;
49  return pow(x1 / x2, camera_response->c);
50}
51
52static inline float RemoveCRF(float v,
53                              const CameraResponse_t* camera_response) {
54  const float pow_vc = pow(v, 1.0f / camera_response->c);
55  const float x2 = exp(camera_response->b) * (1.0f - pow_vc) + 1.0f;
56  return pow(pow_vc / x2, 1.0f / camera_response->a);
57}
58
59static inline float ApplyCRFdefault(float v) {
60  const float pow_va = pow(v, kDefaultA);
61  const float exp_b = exp(kDefaultB);
62  const float x1 = (exp_b + 1.0f) * pow_va;
63  const float x2 = exp_b * pow_va + 1.0f;
64  return pow(x1 / x2, kDefaultC);
65}
66
67static inline float RemoveCRFdefault(float v) {
68  const float pow_vc = pow(v, 1.0f / kDefaultC);
69  const float x2 = exp(kDefaultB) * (1.0f - pow_vc) + 1.0f;
70  return pow(pow_vc / x2, 1.0f / kDefaultA);
71}
72
73static inline void ComputeLUTs(CameraResponse_t* camera_response) {
74  for (int i = 0; i < kLUTLength; ++i) {
75    const float value = i / (float)(kLUTLength - 1);
76
77    const float y = ApplyCRF(value, camera_response);
78    camera_response->lut_apply_crf_float[i] = y;
79    camera_response->lut_apply_crf_int[i] = round((kLUTLength - 1) * y);
80
81    const float x = RemoveCRF(value, camera_response);
82    camera_response->lut_remove_crf_float[i] = x;
83    camera_response->lut_remove_crf_int[i] = round((kLUTLength - 1) * x);
84  }
85
86  // Add a boundary to correctly deal with the max value in ApplyLUT.
87  camera_response->lut_apply_crf_float[kLUTLength] =
88      camera_response->lut_apply_crf_float[kLUTLength - 1];
89  camera_response->lut_remove_crf_float[kLUTLength] =
90      camera_response->lut_remove_crf_float[kLUTLength - 1];
91}
92
93static inline float ApplyLUT_Float(float value, float lut[kLUTLength + 1]) {
94  const float scaled_value = (kLUTLength - 1) * value;
95  const int pos = (int)(scaled_value);
96  const float delta = scaled_value - pos;
97  return lut[pos] + delta * (lut[pos + 1] - lut[pos]);
98}
99
100static inline float ApplyLUT_FloatAlloc(float value) {
101  const float scaled_value = (kLUTLength - 1) * value;
102  const int pos = (int)(scaled_value);
103  const float delta = scaled_value - pos;
104  float valPos = rsGetElementAt_float(g_lut_apply_crf_float, pos);
105  float valPosPlus = rsGetElementAt_float(g_lut_apply_crf_float, pos+1);
106  return valPos + delta * (valPosPlus - valPos);
107}
108
109static inline float ApplyLUT_FloatRemoveAlloc(float value) {
110  const float scaled_value = (kLUTLength - 1) * value;
111  const int pos = (int)(scaled_value);
112  const float delta = scaled_value - pos;
113  float valPos = rsGetElementAt_float(g_lut_remove_crf_float, pos);
114  float valPosPlus = rsGetElementAt_float(g_lut_remove_crf_float, pos+1);
115  return valPos + delta * (valPosPlus - valPos);
116}
117
118static inline int ApplyLUT_Int(int value, int lut[kLUTLength]) {
119  return lut[value];
120}
121
122static inline void InitializeDefaultCameraResponse(
123    CameraResponse_t* camera_response) {
124  camera_response->a = kDefaultA;
125  camera_response->b = kDefaultB;
126  camera_response->c = kDefaultC;
127  ComputeLUTs(camera_response);
128}
129
130#endif
131