5 * i_scale_mixing() is based on code contained in pnmscale.c, part of
6 * the netpbm distribution. No code was copied from pnmscale but
7 * the algorthm was and for this I thank the netpbm crew.
12 /* pnmscale.c - read a portable anymap and scale it
14 ** Copyright (C) 1989, 1991 by Jef Poskanzer.
16 ** Permission to use, copy, modify, and distribute this software and its
17 ** documentation for any purpose and without fee is hereby granted, provided
18 ** that the above copyright notice appear in all copies and that both that
19 ** copyright notice and this permission notice appear in supporting
20 ** documentation. This software is provided "as is" without express or
27 zero_row(i_fcolor *row, i_img_dim width, int channels);
31 IM_SUFFIX(accum_output_row)(i_fcolor *accum, double fraction, IM_COLOR const *in,
32 i_img_dim width, int channels);
34 IM_SUFFIX(horizontal_scale)(IM_COLOR *out, i_img_dim out_width,
35 i_fcolor const *in, i_img_dim in_width,
42 Returns a new image scaled to the given size.
44 Unlike i_scale_axis() this does a simple coverage of pixels from
45 source to target and doesn't resample.
47 Adapted from pnmscale.
52 i_scale_mixing(i_img *src, i_img_dim x_out, i_img_dim y_out) {
54 i_fcolor *accum_row = NULL;
57 size_t accum_row_bytes;
58 double rowsleft, fracrowtofill;
62 mm_log((1, "i_scale_mixing(src %p, out(" i_DFp "))\n",
63 src, i_DFcp(x_out, y_out)));
68 i_push_errorf(0, "output width %" i_DF " invalid", i_DFc(x_out));
72 i_push_errorf(0, "output height %" i_DF " invalid", i_DFc(y_out));
76 if (x_out == src->xsize && y_out == src->ysize) {
80 y_scale = y_out / (double)src->ysize;
82 accum_row_bytes = sizeof(i_fcolor) * src->xsize;
83 if (accum_row_bytes / sizeof(i_fcolor) != src->xsize) {
84 i_push_error(0, "integer overflow allocating accumulator row buffer");
88 result = i_sametype_chans(src, x_out, y_out, src->channels);
92 accum_row = mymalloc(accum_row_bytes);
95 IM_COLOR *in_row = NULL;
96 IM_COLOR *xscale_row = NULL;
97 size_t in_row_bytes, out_row_bytes;
99 in_row_bytes = sizeof(IM_COLOR) * src->xsize;
100 if (in_row_bytes / sizeof(IM_COLOR) != src->xsize) {
102 i_img_destroy(result);
103 i_push_error(0, "integer overflow allocating input row buffer");
106 out_row_bytes = sizeof(IM_COLOR) * x_out;
107 if (out_row_bytes / sizeof(IM_COLOR) != x_out) {
109 i_img_destroy(result);
110 i_push_error(0, "integer overflow allocating output row buffer");
114 in_row = mymalloc(in_row_bytes);
115 xscale_row = mymalloc(out_row_bytes);
119 for (y = 0; y < y_out; ++y) {
120 if (y_out == src->ysize) {
121 /* no vertical scaling, just load it */
125 /* load and convert to doubles */
126 IM_GLIN(src, 0, src->xsize, y, in_row);
127 for (x = 0; x < src->xsize; ++x) {
128 for (ch = 0; ch < src->channels; ++ch) {
129 accum_row[x].channel[ch] = in_row[x].channel[ch];
133 IM_GLIN(src, 0, src->xsize, y, accum_row);
135 /* alpha adjust if needed */
136 if (src->channels == 2 || src->channels == 4) {
137 for (x = 0; x < src->xsize; ++x) {
138 for (ch = 0; ch < src->channels-1; ++ch) {
139 accum_row[x].channel[ch] *=
140 accum_row[x].channel[src->channels-1] / IM_SAMPLE_MAX;
147 zero_row(accum_row, src->xsize, src->channels);
148 while (fracrowtofill > 0) {
150 if (rowsread < src->ysize) {
151 IM_GLIN(src, 0, src->xsize, rowsread, in_row);
154 /* else just use the last row read */
158 if (rowsleft < fracrowtofill) {
159 IM_SUFFIX(accum_output_row)(accum_row, rowsleft, in_row,
160 src->xsize, src->channels);
161 fracrowtofill -= rowsleft;
165 IM_SUFFIX(accum_output_row)(accum_row, fracrowtofill, in_row,
166 src->xsize, src->channels);
167 rowsleft -= fracrowtofill;
172 /* we've accumulated a vertically scaled row */
173 if (x_out == src->xsize) {
177 /* no need to scale, but we need to convert it */
178 if (result->channels == 2 || result->channels == 4) {
179 int alpha_chan = result->channels - 1;
180 for (x = 0; x < x_out; ++x) {
181 double alpha = accum_row[x].channel[alpha_chan] / IM_SAMPLE_MAX;
183 for (ch = 0; ch < alpha_chan; ++ch) {
184 int val = accum_row[x].channel[ch] / alpha + 0.5;
185 xscale_row[x].channel[ch] = IM_LIMIT(val);
189 /* rather than leaving any color data as whatever was
190 originally in the buffer, set it to black. This isn't
191 any more correct, but it gives us more compressible
195 for (ch = 0; ch < alpha_chan; ++ch) {
196 xscale_row[x].channel[ch] = 0;
199 xscale_row[x].channel[alpha_chan] = IM_LIMIT(accum_row[x].channel[alpha_chan]+0.5);
203 for (x = 0; x < x_out; ++x) {
204 for (ch = 0; ch < result->channels; ++ch)
205 xscale_row[x].channel[ch] = IM_LIMIT(accum_row[x].channel[ch]+0.5);
208 IM_PLIN(result, 0, x_out, y, xscale_row);
210 IM_PLIN(result, 0, x_out, y, accum_row);
214 IM_SUFFIX(horizontal_scale)(xscale_row, x_out, accum_row,
215 src->xsize, src->channels);
216 IM_PLIN(result, 0, x_out, y, xscale_row);
228 zero_row(i_fcolor *row, i_img_dim width, int channels) {
232 /* with IEEE floats we could just use memset() but that's not
233 safe in general under ANSI C.
234 memset() is slightly faster.
236 for (x = 0; x < width; ++x) {
237 for (ch = 0; ch < channels; ++ch)
238 row[x].channel[ch] = 0.0;
245 IM_SUFFIX(accum_output_row)(i_fcolor *accum, double fraction, IM_COLOR const *in,
246 i_img_dim width, int channels) {
250 /* it's tempting to change this into a pointer iteration loop but
251 modern CPUs do the indexing as part of the instruction */
252 if (channels == 2 || channels == 4) {
253 for (x = 0; x < width; ++x) {
254 for (ch = 0; ch < channels-1; ++ch) {
255 accum[x].channel[ch] += in[x].channel[ch] * fraction * in[x].channel[channels-1] / IM_SAMPLE_MAX;
257 accum[x].channel[channels-1] += in[x].channel[channels-1] * fraction;
261 for (x = 0; x < width; ++x) {
262 for (ch = 0; ch < channels; ++ch) {
263 accum[x].channel[ch] += in[x].channel[ch] * fraction;
270 IM_SUFFIX(horizontal_scale)(IM_COLOR *out, i_img_dim out_width,
271 i_fcolor const *in, i_img_dim in_width,
273 double frac_col_to_fill, frac_col_left;
276 double x_scale = (double)out_width / in_width;
278 double accum[MAXCHANNELS] = { 0 };
280 frac_col_to_fill = 1.0;
282 for (in_x = 0; in_x < in_width; ++in_x) {
283 frac_col_left = x_scale;
284 while (frac_col_left >= frac_col_to_fill) {
285 for (ch = 0; ch < channels; ++ch)
286 accum[ch] += frac_col_to_fill * in[in_x].channel[ch];
288 if (channels == 2 || channels == 4) {
289 int alpha_chan = channels - 1;
290 double alpha = accum[alpha_chan] / IM_SAMPLE_MAX;
292 for (ch = 0; ch < alpha_chan; ++ch) {
293 IM_WORK_T val = IM_ROUND(accum[ch] / alpha);
294 out[out_x].channel[ch] = IM_LIMIT(val);
298 for (ch = 0; ch < alpha_chan; ++ch) {
299 /* See RT #32324 (and mention above) */
300 out[out_x].channel[ch] = 0;
303 out[out_x].channel[alpha_chan] = IM_LIMIT(IM_ROUND(accum[alpha_chan]));
306 for (ch = 0; ch < channels; ++ch) {
307 IM_WORK_T val = IM_ROUND(accum[ch]);
308 out[out_x].channel[ch] = IM_LIMIT(val);
311 for (ch = 0; ch < channels; ++ch)
313 frac_col_left -= frac_col_to_fill;
314 frac_col_to_fill = 1.0;
318 if (frac_col_left > 0) {
319 for (ch = 0; ch < channels; ++ch) {
320 accum[ch] += frac_col_left * in[in_x].channel[ch];
322 frac_col_to_fill -= frac_col_left;
326 if (out_x < out_width-1 || out_x > out_width) {
327 i_fatal(3, "Internal error: out_x %d out of range (width %d)", out_x, out_width);
330 if (out_x < out_width) {
331 for (ch = 0; ch < channels; ++ch) {
332 accum[ch] += frac_col_to_fill * in[in_width-1].channel[ch];
334 if (channels == 2 || channels == 4) {
335 int alpha_chan = channels - 1;
336 double alpha = accum[alpha_chan] / IM_SAMPLE_MAX;
338 for (ch = 0; ch < alpha_chan; ++ch) {
339 IM_WORK_T val = IM_ROUND(accum[ch] / alpha);
340 out[out_x].channel[ch] = IM_LIMIT(val);
344 for (ch = 0; ch < alpha_chan; ++ch) {
345 /* See RT #32324 (and mention above) */
346 out[out_x].channel[ch] = 0;
349 out[out_x].channel[alpha_chan] = IM_LIMIT(IM_ROUND(accum[alpha_chan]));
352 for (ch = 0; ch < channels; ++ch) {
353 IM_WORK_T val = IM_ROUND(accum[ch]);
354 out[out_x].channel[ch] = IM_LIMIT(val);