5 * i_scale_mixing() is based on code contained in pnmscale.c, part of
6 * the netpbm distribution. No code was copied from pnmscale but
7 * the algorthm was and for this I thank the netpbm crew.
12 /* pnmscale.c - read a portable anymap and scale it
14 ** Copyright (C) 1989, 1991 by Jef Poskanzer.
16 ** Permission to use, copy, modify, and distribute this software and its
17 ** documentation for any purpose and without fee is hereby granted, provided
18 ** that the above copyright notice appear in all copies and that both that
19 ** copyright notice and this permission notice appear in supporting
20 ** documentation. This software is provided "as is" without express or
27 zero_row(i_fcolor *row, int width, int channels);
31 IM_SUFFIX(accum_output_row)(i_fcolor *accum, double fraction, IM_COLOR const *in,
32 int width, int channels);
34 IM_SUFFIX(horizontal_scale)(IM_COLOR *out, int out_width,
35 i_fcolor const *in, int in_width,
42 Returns a new image scaled to the given size.
44 Unlike i_scale_axis() this does a simple coverage of pixels from
45 source to target and doesn't resample.
47 Adapted from pnmscale.
52 i_scale_mixing(i_img *src, int x_out, int y_out) {
54 i_fcolor *accum_row = NULL;
57 double rowsleft, fracrowtofill;
61 mm_log((1, "i_scale_mixing(src %p, x_out %d, y_out %d)\n",
67 i_push_errorf(0, "output width %d invalid", x_out);
71 i_push_errorf(0, "output height %d invalid", y_out);
75 if (x_out == src->xsize && y_out == src->ysize) {
79 y_scale = y_out / (double)src->ysize;
81 result = i_sametype_chans(src, x_out, y_out, src->channels);
85 accum_row_bytes = sizeof(i_fcolor) * src->xsize;
86 if (accum_row_bytes / sizeof(i_fcolor) != src->xsize) {
87 i_push_error(0, "integer overflow allocating accumulator row buffer");
91 accum_row = mymalloc(accum_row_bytes);
94 IM_COLOR *in_row = NULL;
95 IM_COLOR *xscale_row = NULL;
96 int in_row_bytes, out_row_bytes;
98 in_row_bytes = sizeof(IM_COLOR) * src->xsize;
99 if (in_row_bytes / sizeof(IM_COLOR) != src->xsize) {
100 i_push_error(0, "integer overflow allocating input row buffer");
103 out_row_bytes = sizeof(IM_COLOR) * x_out;
104 if (out_row_bytes / sizeof(IM_COLOR) != x_out) {
105 i_push_error(0, "integer overflow allocating output row buffer");
109 in_row = mymalloc(in_row_bytes);
110 xscale_row = mymalloc(out_row_bytes);
114 for (y = 0; y < y_out; ++y) {
115 if (y_out == src->ysize) {
116 /* no vertical scaling, just load it */
119 /* load and convert to doubles */
120 IM_GLIN(src, 0, src->xsize, y, in_row);
121 for (x = 0; x < src->xsize; ++x) {
122 for (ch = 0; ch < src->channels; ++ch) {
123 accum_row[x].channel[ch] = in_row[x].channel[ch];
127 IM_GLIN(src, 0, src->xsize, y, accum_row);
129 /* alpha adjust if needed */
130 if (src->channels == 2 || src->channels == 4) {
131 for (x = 0; x < src->xsize; ++x) {
132 for (ch = 0; ch < src->channels-1; ++ch) {
133 accum_row[x].channel[ch] *=
134 accum_row[x].channel[src->channels-1] / IM_SAMPLE_MAX;
141 zero_row(accum_row, src->xsize, src->channels);
142 while (fracrowtofill > 0) {
144 if (rowsread < src->ysize) {
145 IM_GLIN(src, 0, src->xsize, rowsread, in_row);
148 /* else just use the last row read */
152 if (rowsleft < fracrowtofill) {
153 IM_SUFFIX(accum_output_row)(accum_row, rowsleft, in_row,
154 src->xsize, src->channels);
155 fracrowtofill -= rowsleft;
159 IM_SUFFIX(accum_output_row)(accum_row, fracrowtofill, in_row,
160 src->xsize, src->channels);
161 rowsleft -= fracrowtofill;
166 /* we've accumulated a vertically scaled row */
167 if (x_out == src->xsize) {
170 /* no need to scale, but we need to convert it */
171 if (result->channels == 2 || result->channels == 4) {
172 int alpha_chan = result->channels - 1;
173 for (x = 0; x < x_out; ++x) {
174 double alpha = accum_row[x].channel[alpha_chan] / IM_SAMPLE_MAX;
176 for (ch = 0; ch < alpha_chan; ++ch) {
177 int val = accum_row[x].channel[ch] / alpha + 0.5;
178 xscale_row[x].channel[ch] = IM_LIMIT(val);
182 /* rather than leaving any color data as whatever was
183 originally in the buffer, set it to black. This isn't
184 any more correct, but it gives us more compressible
188 for (ch = 0; ch < alpha_chan; ++ch) {
189 xscale_row[x].channel[ch] = 0;
192 xscale_row[x].channel[alpha_chan] = IM_LIMIT(accum_row[x].channel[alpha_chan]+0.5);
196 for (x = 0; x < x_out; ++x) {
197 for (ch = 0; ch < result->channels; ++ch)
198 xscale_row[x].channel[ch] = IM_LIMIT(accum_row[x].channel[ch]+0.5);
201 IM_PLIN(result, 0, x_out, y, xscale_row);
203 IM_PLIN(result, 0, x_out, y, accum_row);
207 IM_SUFFIX(horizontal_scale)(xscale_row, x_out, accum_row,
208 src->xsize, src->channels);
209 IM_PLIN(result, 0, x_out, y, xscale_row);
221 zero_row(i_fcolor *row, int width, int channels) {
225 /* with IEEE floats we could just use memset() but that's not
226 safe in general under ANSI C.
227 memset() is slightly faster.
229 for (x = 0; x < width; ++x) {
230 for (ch = 0; ch < channels; ++ch)
231 row[x].channel[ch] = 0.0;
238 IM_SUFFIX(accum_output_row)(i_fcolor *accum, double fraction, IM_COLOR const *in,
239 int width, int channels) {
242 /* it's tempting to change this into a pointer iteration loop but
243 modern CPUs do the indexing as part of the instruction */
244 if (channels == 2 || channels == 4) {
245 for (x = 0; x < width; ++x) {
246 for (ch = 0; ch < channels-1; ++ch) {
247 accum[x].channel[ch] += in[x].channel[ch] * fraction * in[x].channel[channels-1] / IM_SAMPLE_MAX;
249 accum[x].channel[channels-1] += in[x].channel[channels-1] * fraction;
253 for (x = 0; x < width; ++x) {
254 for (ch = 0; ch < channels; ++ch) {
255 accum[x].channel[ch] += in[x].channel[ch] * fraction;
262 IM_SUFFIX(horizontal_scale)(IM_COLOR *out, int out_width,
263 i_fcolor const *in, int in_width,
265 double frac_col_to_fill, frac_col_left;
268 double x_scale = (double)out_width / in_width;
270 double accum[MAXCHANNELS] = { 0 };
272 frac_col_to_fill = 1.0;
274 for (in_x = 0; in_x < in_width; ++in_x) {
275 frac_col_left = x_scale;
276 while (frac_col_left >= frac_col_to_fill) {
277 for (ch = 0; ch < channels; ++ch)
278 accum[ch] += frac_col_to_fill * in[in_x].channel[ch];
280 if (channels == 2 || channels == 4) {
281 int alpha_chan = channels - 1;
282 double alpha = accum[alpha_chan] / IM_SAMPLE_MAX;
284 for (ch = 0; ch < alpha_chan; ++ch) {
285 IM_WORK_T val = IM_ROUND(accum[ch] / alpha);
286 out[out_x].channel[ch] = IM_LIMIT(val);
290 for (ch = 0; ch < alpha_chan; ++ch) {
291 /* See RT #32324 (and mention above) */
292 out[out_x].channel[ch] = 0;
295 out[out_x].channel[alpha_chan] = IM_LIMIT(IM_ROUND(accum[alpha_chan]));
298 for (ch = 0; ch < channels; ++ch) {
299 IM_WORK_T val = IM_ROUND(accum[ch]);
300 out[out_x].channel[ch] = IM_LIMIT(val);
303 for (ch = 0; ch < channels; ++ch)
305 frac_col_left -= frac_col_to_fill;
306 frac_col_to_fill = 1.0;
310 if (frac_col_left > 0) {
311 for (ch = 0; ch < channels; ++ch) {
312 accum[ch] += frac_col_left * in[in_x].channel[ch];
314 frac_col_to_fill -= frac_col_left;
318 if (out_x < out_width-1 || out_x > out_width) {
319 i_fatal(3, "Internal error: out_x %d out of range (width %d)", out_x, out_width);
322 if (out_x < out_width) {
323 for (ch = 0; ch < channels; ++ch) {
324 accum[ch] += frac_col_to_fill * in[in_width-1].channel[ch];
326 if (channels == 2 || channels == 4) {
327 int alpha_chan = channels - 1;
328 double alpha = accum[alpha_chan] / IM_SAMPLE_MAX;
330 for (ch = 0; ch < alpha_chan; ++ch) {
331 IM_WORK_T val = IM_ROUND(accum[ch] / alpha);
332 out[out_x].channel[ch] = IM_LIMIT(val);
336 for (ch = 0; ch < alpha_chan; ++ch) {
337 /* See RT #32324 (and mention above) */
338 out[out_x].channel[ch] = 0;
341 out[out_x].channel[alpha_chan] = IM_LIMIT(IM_ROUND(accum[alpha_chan]));
344 for (ch = 0; ch < channels; ++ch) {
345 IM_WORK_T val = IM_ROUND(accum[ch]);
346 out[out_x].channel[ch] = IM_LIMIT(val);