| 1 | #include "imager.h" |
| 2 | #include "imageri.h" |
| 3 | |
| 4 | /* |
| 5 | * i_scale_mixing() is based on code contained in pnmscale.c, part of |
| 6 | * the netpbm distribution. No code was copied from pnmscale but |
| 7 | * the algorthm was and for this I thank the netpbm crew. |
| 8 | * |
| 9 | * Tony |
| 10 | */ |
| 11 | |
| 12 | /* pnmscale.c - read a portable anymap and scale it |
| 13 | ** |
| 14 | ** Copyright (C) 1989, 1991 by Jef Poskanzer. |
| 15 | ** |
| 16 | ** Permission to use, copy, modify, and distribute this software and its |
| 17 | ** documentation for any purpose and without fee is hereby granted, provided |
| 18 | ** that the above copyright notice appear in all copies and that both that |
| 19 | ** copyright notice and this permission notice appear in supporting |
| 20 | ** documentation. This software is provided "as is" without express or |
| 21 | ** implied warranty. |
| 22 | ** |
| 23 | */ |
| 24 | |
| 25 | |
| 26 | static void |
| 27 | zero_row(i_fcolor *row, i_img_dim width, int channels); |
| 28 | |
| 29 | #code |
| 30 | static void |
| 31 | IM_SUFFIX(accum_output_row)(i_fcolor *accum, double fraction, IM_COLOR const *in, |
| 32 | i_img_dim width, int channels); |
| 33 | static void |
| 34 | IM_SUFFIX(horizontal_scale)(IM_COLOR *out, i_img_dim out_width, |
| 35 | i_fcolor const *in, i_img_dim in_width, |
| 36 | int channels); |
| 37 | #/code |
| 38 | |
| 39 | /* |
| 40 | =item i_scale_mixing |
| 41 | |
| 42 | Returns a new image scaled to the given size. |
| 43 | |
| 44 | Unlike i_scale_axis() this does a simple coverage of pixels from |
| 45 | source to target and doesn't resample. |
| 46 | |
| 47 | Adapted from pnmscale. |
| 48 | |
| 49 | =cut |
| 50 | */ |
| 51 | i_img * |
| 52 | i_scale_mixing(i_img *src, i_img_dim x_out, i_img_dim y_out) { |
| 53 | i_img *result = NULL; |
| 54 | i_fcolor *accum_row = NULL; |
| 55 | i_img_dim x, y; |
| 56 | int ch; |
| 57 | size_t accum_row_bytes; |
| 58 | double rowsleft, fracrowtofill; |
| 59 | i_img_dim rowsread; |
| 60 | double y_scale; |
| 61 | |
| 62 | mm_log((1, "i_scale_mixing(src %p, out(" i_DFp "))\n", |
| 63 | src, i_DFcp(x_out, y_out))); |
| 64 | |
| 65 | i_clear_error(); |
| 66 | |
| 67 | if (x_out <= 0) { |
| 68 | i_push_errorf(0, "output width %" i_DF " invalid", i_DFc(x_out)); |
| 69 | return NULL; |
| 70 | } |
| 71 | if (y_out <= 0) { |
| 72 | i_push_errorf(0, "output height %" i_DF " invalid", i_DFc(y_out)); |
| 73 | return NULL; |
| 74 | } |
| 75 | |
| 76 | if (x_out == src->xsize && y_out == src->ysize) { |
| 77 | return i_copy(src); |
| 78 | } |
| 79 | |
| 80 | y_scale = y_out / (double)src->ysize; |
| 81 | |
| 82 | accum_row_bytes = sizeof(i_fcolor) * src->xsize; |
| 83 | if (accum_row_bytes / sizeof(i_fcolor) != src->xsize) { |
| 84 | i_push_error(0, "integer overflow allocating accumulator row buffer"); |
| 85 | return NULL; |
| 86 | } |
| 87 | |
| 88 | result = i_sametype_chans(src, x_out, y_out, src->channels); |
| 89 | if (!result) |
| 90 | return NULL; |
| 91 | |
| 92 | accum_row = mymalloc(accum_row_bytes); |
| 93 | |
| 94 | #code src->bits <= 8 |
| 95 | IM_COLOR *in_row = NULL; |
| 96 | IM_COLOR *xscale_row = NULL; |
| 97 | size_t in_row_bytes, out_row_bytes; |
| 98 | |
| 99 | in_row_bytes = sizeof(IM_COLOR) * src->xsize; |
| 100 | if (in_row_bytes / sizeof(IM_COLOR) != src->xsize) { |
| 101 | myfree(accum_row); |
| 102 | i_img_destroy(result); |
| 103 | i_push_error(0, "integer overflow allocating input row buffer"); |
| 104 | return NULL; |
| 105 | } |
| 106 | out_row_bytes = sizeof(IM_COLOR) * x_out; |
| 107 | if (out_row_bytes / sizeof(IM_COLOR) != x_out) { |
| 108 | myfree(accum_row); |
| 109 | i_img_destroy(result); |
| 110 | i_push_error(0, "integer overflow allocating output row buffer"); |
| 111 | return NULL; |
| 112 | } |
| 113 | |
| 114 | in_row = mymalloc(in_row_bytes); |
| 115 | xscale_row = mymalloc(out_row_bytes); |
| 116 | |
| 117 | rowsread = 0; |
| 118 | rowsleft = 0.0; |
| 119 | for (y = 0; y < y_out; ++y) { |
| 120 | if (y_out == src->ysize) { |
| 121 | /* no vertical scaling, just load it */ |
| 122 | #ifdef IM_EIGHT_BIT |
| 123 | i_img_dim x; |
| 124 | int ch; |
| 125 | /* load and convert to doubles */ |
| 126 | IM_GLIN(src, 0, src->xsize, y, in_row); |
| 127 | for (x = 0; x < src->xsize; ++x) { |
| 128 | for (ch = 0; ch < src->channels; ++ch) { |
| 129 | accum_row[x].channel[ch] = in_row[x].channel[ch]; |
| 130 | } |
| 131 | } |
| 132 | #else |
| 133 | IM_GLIN(src, 0, src->xsize, y, accum_row); |
| 134 | #endif |
| 135 | /* alpha adjust if needed */ |
| 136 | if (src->channels == 2 || src->channels == 4) { |
| 137 | for (x = 0; x < src->xsize; ++x) { |
| 138 | for (ch = 0; ch < src->channels-1; ++ch) { |
| 139 | accum_row[x].channel[ch] *= |
| 140 | accum_row[x].channel[src->channels-1] / IM_SAMPLE_MAX; |
| 141 | } |
| 142 | } |
| 143 | } |
| 144 | } |
| 145 | else { |
| 146 | fracrowtofill = 1.0; |
| 147 | zero_row(accum_row, src->xsize, src->channels); |
| 148 | while (fracrowtofill > 0) { |
| 149 | if (rowsleft <= 0) { |
| 150 | if (rowsread < src->ysize) { |
| 151 | IM_GLIN(src, 0, src->xsize, rowsread, in_row); |
| 152 | ++rowsread; |
| 153 | } |
| 154 | /* else just use the last row read */ |
| 155 | |
| 156 | rowsleft = y_scale; |
| 157 | } |
| 158 | if (rowsleft < fracrowtofill) { |
| 159 | IM_SUFFIX(accum_output_row)(accum_row, rowsleft, in_row, |
| 160 | src->xsize, src->channels); |
| 161 | fracrowtofill -= rowsleft; |
| 162 | rowsleft = 0; |
| 163 | } |
| 164 | else { |
| 165 | IM_SUFFIX(accum_output_row)(accum_row, fracrowtofill, in_row, |
| 166 | src->xsize, src->channels); |
| 167 | rowsleft -= fracrowtofill; |
| 168 | fracrowtofill = 0; |
| 169 | } |
| 170 | } |
| 171 | } |
| 172 | /* we've accumulated a vertically scaled row */ |
| 173 | if (x_out == src->xsize) { |
| 174 | #if IM_EIGHT_BIT |
| 175 | i_img_dim x; |
| 176 | int ch; |
| 177 | /* no need to scale, but we need to convert it */ |
| 178 | if (result->channels == 2 || result->channels == 4) { |
| 179 | int alpha_chan = result->channels - 1; |
| 180 | for (x = 0; x < x_out; ++x) { |
| 181 | double alpha = accum_row[x].channel[alpha_chan] / IM_SAMPLE_MAX; |
| 182 | if (alpha) { |
| 183 | for (ch = 0; ch < alpha_chan; ++ch) { |
| 184 | int val = accum_row[x].channel[ch] / alpha + 0.5; |
| 185 | xscale_row[x].channel[ch] = IM_LIMIT(val); |
| 186 | } |
| 187 | } |
| 188 | else { |
| 189 | /* rather than leaving any color data as whatever was |
| 190 | originally in the buffer, set it to black. This isn't |
| 191 | any more correct, but it gives us more compressible |
| 192 | image data. |
| 193 | RT #32324 |
| 194 | */ |
| 195 | for (ch = 0; ch < alpha_chan; ++ch) { |
| 196 | xscale_row[x].channel[ch] = 0; |
| 197 | } |
| 198 | } |
| 199 | xscale_row[x].channel[alpha_chan] = IM_LIMIT(accum_row[x].channel[alpha_chan]+0.5); |
| 200 | } |
| 201 | } |
| 202 | else { |
| 203 | for (x = 0; x < x_out; ++x) { |
| 204 | for (ch = 0; ch < result->channels; ++ch) |
| 205 | xscale_row[x].channel[ch] = IM_LIMIT(accum_row[x].channel[ch]+0.5); |
| 206 | } |
| 207 | } |
| 208 | IM_PLIN(result, 0, x_out, y, xscale_row); |
| 209 | #else |
| 210 | IM_PLIN(result, 0, x_out, y, accum_row); |
| 211 | #endif |
| 212 | } |
| 213 | else { |
| 214 | IM_SUFFIX(horizontal_scale)(xscale_row, x_out, accum_row, |
| 215 | src->xsize, src->channels); |
| 216 | IM_PLIN(result, 0, x_out, y, xscale_row); |
| 217 | } |
| 218 | } |
| 219 | myfree(in_row); |
| 220 | myfree(xscale_row); |
| 221 | #/code |
| 222 | myfree(accum_row); |
| 223 | |
| 224 | return result; |
| 225 | } |
| 226 | |
| 227 | static void |
| 228 | zero_row(i_fcolor *row, i_img_dim width, int channels) { |
| 229 | i_img_dim x; |
| 230 | int ch; |
| 231 | |
| 232 | /* with IEEE floats we could just use memset() but that's not |
| 233 | safe in general under ANSI C. |
| 234 | memset() is slightly faster. |
| 235 | */ |
| 236 | for (x = 0; x < width; ++x) { |
| 237 | for (ch = 0; ch < channels; ++ch) |
| 238 | row[x].channel[ch] = 0.0; |
| 239 | } |
| 240 | } |
| 241 | |
| 242 | #code |
| 243 | |
| 244 | static void |
| 245 | IM_SUFFIX(accum_output_row)(i_fcolor *accum, double fraction, IM_COLOR const *in, |
| 246 | i_img_dim width, int channels) { |
| 247 | i_img_dim x; |
| 248 | int ch; |
| 249 | |
| 250 | /* it's tempting to change this into a pointer iteration loop but |
| 251 | modern CPUs do the indexing as part of the instruction */ |
| 252 | if (channels == 2 || channels == 4) { |
| 253 | for (x = 0; x < width; ++x) { |
| 254 | for (ch = 0; ch < channels-1; ++ch) { |
| 255 | accum[x].channel[ch] += in[x].channel[ch] * fraction * in[x].channel[channels-1] / IM_SAMPLE_MAX; |
| 256 | } |
| 257 | accum[x].channel[channels-1] += in[x].channel[channels-1] * fraction; |
| 258 | } |
| 259 | } |
| 260 | else { |
| 261 | for (x = 0; x < width; ++x) { |
| 262 | for (ch = 0; ch < channels; ++ch) { |
| 263 | accum[x].channel[ch] += in[x].channel[ch] * fraction; |
| 264 | } |
| 265 | } |
| 266 | } |
| 267 | } |
| 268 | |
| 269 | static void |
| 270 | IM_SUFFIX(horizontal_scale)(IM_COLOR *out, i_img_dim out_width, |
| 271 | i_fcolor const *in, i_img_dim in_width, |
| 272 | int channels) { |
| 273 | double frac_col_to_fill, frac_col_left; |
| 274 | i_img_dim in_x; |
| 275 | i_img_dim out_x; |
| 276 | double x_scale = (double)out_width / in_width; |
| 277 | int ch; |
| 278 | double accum[MAXCHANNELS] = { 0 }; |
| 279 | |
| 280 | frac_col_to_fill = 1.0; |
| 281 | out_x = 0; |
| 282 | for (in_x = 0; in_x < in_width; ++in_x) { |
| 283 | frac_col_left = x_scale; |
| 284 | while (frac_col_left >= frac_col_to_fill) { |
| 285 | for (ch = 0; ch < channels; ++ch) |
| 286 | accum[ch] += frac_col_to_fill * in[in_x].channel[ch]; |
| 287 | |
| 288 | if (channels == 2 || channels == 4) { |
| 289 | int alpha_chan = channels - 1; |
| 290 | double alpha = accum[alpha_chan] / IM_SAMPLE_MAX; |
| 291 | if (alpha) { |
| 292 | for (ch = 0; ch < alpha_chan; ++ch) { |
| 293 | IM_WORK_T val = IM_ROUND(accum[ch] / alpha); |
| 294 | out[out_x].channel[ch] = IM_LIMIT(val); |
| 295 | } |
| 296 | } |
| 297 | else { |
| 298 | for (ch = 0; ch < alpha_chan; ++ch) { |
| 299 | /* See RT #32324 (and mention above) */ |
| 300 | out[out_x].channel[ch] = 0; |
| 301 | } |
| 302 | } |
| 303 | out[out_x].channel[alpha_chan] = IM_LIMIT(IM_ROUND(accum[alpha_chan])); |
| 304 | } |
| 305 | else { |
| 306 | for (ch = 0; ch < channels; ++ch) { |
| 307 | IM_WORK_T val = IM_ROUND(accum[ch]); |
| 308 | out[out_x].channel[ch] = IM_LIMIT(val); |
| 309 | } |
| 310 | } |
| 311 | for (ch = 0; ch < channels; ++ch) |
| 312 | accum[ch] = 0; |
| 313 | frac_col_left -= frac_col_to_fill; |
| 314 | frac_col_to_fill = 1.0; |
| 315 | ++out_x; |
| 316 | } |
| 317 | |
| 318 | if (frac_col_left > 0) { |
| 319 | for (ch = 0; ch < channels; ++ch) { |
| 320 | accum[ch] += frac_col_left * in[in_x].channel[ch]; |
| 321 | } |
| 322 | frac_col_to_fill -= frac_col_left; |
| 323 | } |
| 324 | } |
| 325 | |
| 326 | if (out_x < out_width-1 || out_x > out_width) { |
| 327 | i_fatal(3, "Internal error: out_x %d out of range (width %d)", out_x, out_width); |
| 328 | } |
| 329 | |
| 330 | if (out_x < out_width) { |
| 331 | for (ch = 0; ch < channels; ++ch) { |
| 332 | accum[ch] += frac_col_to_fill * in[in_width-1].channel[ch]; |
| 333 | } |
| 334 | if (channels == 2 || channels == 4) { |
| 335 | int alpha_chan = channels - 1; |
| 336 | double alpha = accum[alpha_chan] / IM_SAMPLE_MAX; |
| 337 | if (alpha) { |
| 338 | for (ch = 0; ch < alpha_chan; ++ch) { |
| 339 | IM_WORK_T val = IM_ROUND(accum[ch] / alpha); |
| 340 | out[out_x].channel[ch] = IM_LIMIT(val); |
| 341 | } |
| 342 | } |
| 343 | else { |
| 344 | for (ch = 0; ch < alpha_chan; ++ch) { |
| 345 | /* See RT #32324 (and mention above) */ |
| 346 | out[out_x].channel[ch] = 0; |
| 347 | } |
| 348 | } |
| 349 | out[out_x].channel[alpha_chan] = IM_LIMIT(IM_ROUND(accum[alpha_chan])); |
| 350 | } |
| 351 | else { |
| 352 | for (ch = 0; ch < channels; ++ch) { |
| 353 | IM_WORK_T val = IM_ROUND(accum[ch]); |
| 354 | out[out_x].channel[ch] = IM_LIMIT(val); |
| 355 | } |
| 356 | } |
| 357 | } |
| 358 | } |
| 359 | |
| 360 | #/code |