5 * i_scale_mixing() is based on code contained in pnmscale.c, part of
6 * the netpbm distribution. No code was copied from pnmscale but
7 * the algorthm was and for this I thank the netpbm crew.
12 /* pnmscale.c - read a portable anymap and scale it
14 ** Copyright (C) 1989, 1991 by Jef Poskanzer.
16 ** Permission to use, copy, modify, and distribute this software and its
17 ** documentation for any purpose and without fee is hereby granted, provided
18 ** that the above copyright notice appear in all copies and that both that
19 ** copyright notice and this permission notice appear in supporting
20 ** documentation. This software is provided "as is" without express or
27 zero_row(i_fcolor *row, int width, int channels);
31 IM_SUFFIX(accum_output_row)(i_fcolor *accum, double fraction, IM_COLOR const *in,
32 int width, int channels);
34 IM_SUFFIX(horizontal_scale)(IM_COLOR *out, int out_width,
35 i_fcolor const *in, int in_width,
42 Returns a new image scaled to the given size.
44 Unlike i_scale_axis() this does a simple coverage of pixels from
45 source to target and doesn't resample.
47 Adapted from pnmscale.
52 i_scale_mixing(i_img *src, int x_out, int y_out) {
54 i_fcolor *accum_row = NULL;
57 double rowsleft, fracrowtofill;
61 mm_log((1, "i_scale_mixing(src %p, x_out %d, y_out %d)\n",
67 i_push_errorf(0, "output width %d invalid", x_out);
71 i_push_errorf(0, "output height %d invalid", y_out);
75 if (x_out == src->xsize && y_out == src->ysize) {
79 y_scale = y_out / (double)src->ysize;
81 result = i_sametype_chans(src, x_out, y_out, src->channels);
85 accum_row_bytes = sizeof(i_fcolor) * src->xsize;
86 if (accum_row_bytes / sizeof(i_fcolor) != src->xsize) {
87 i_push_error(0, "integer overflow allocating accumulator row buffer");
91 accum_row = mymalloc(accum_row_bytes);
94 IM_COLOR *in_row = NULL;
95 IM_COLOR *xscale_row = NULL;
96 int in_row_bytes, out_row_bytes;
98 in_row_bytes = sizeof(IM_COLOR) * src->xsize;
99 if (in_row_bytes / sizeof(IM_COLOR) != src->xsize) {
100 i_push_error(0, "integer overflow allocating input row buffer");
103 out_row_bytes = sizeof(IM_COLOR) * x_out;
104 if (out_row_bytes / sizeof(IM_COLOR) != x_out) {
105 i_push_error(0, "integer overflow allocating output row buffer");
109 in_row = mymalloc(in_row_bytes);
110 xscale_row = mymalloc(out_row_bytes);
114 for (y = 0; y < y_out; ++y) {
115 if (y_out == src->ysize) {
116 /* no vertical scaling, just load it */
119 /* load and convert to doubles */
120 IM_GLIN(src, 0, src->xsize, y, in_row);
121 for (x = 0; x < src->xsize; ++x) {
122 for (ch = 0; ch < src->channels; ++ch) {
123 accum_row[x].channel[ch] = in_row[x].channel[ch];
127 IM_GLIN(src, 0, src->xsize, y, accum_row);
129 /* alpha adjust if needed */
130 if (src->channels == 2 || src->channels == 4) {
131 for (x = 0; x < src->xsize; ++x) {
132 for (ch = 0; ch < src->channels-1; ++ch) {
133 accum_row[x].channel[ch] *=
134 accum_row[x].channel[src->channels-1] / IM_SAMPLE_MAX;
141 zero_row(accum_row, src->xsize, src->channels);
142 while (fracrowtofill > 0) {
144 if (rowsread < src->ysize) {
145 IM_GLIN(src, 0, src->xsize, rowsread, in_row);
148 /* else just use the last row read */
152 if (rowsleft < fracrowtofill) {
153 IM_SUFFIX(accum_output_row)(accum_row, rowsleft, in_row,
154 src->xsize, src->channels);
155 fracrowtofill -= rowsleft;
159 IM_SUFFIX(accum_output_row)(accum_row, fracrowtofill, in_row,
160 src->xsize, src->channels);
161 rowsleft -= fracrowtofill;
166 /* we've accumulated a vertically scaled row */
167 if (x_out == src->xsize) {
170 /* no need to scale, but we need to convert it */
171 if (result->channels == 2 || result->channels == 4) {
172 int alpha_chan = result->channels - 1;
173 for (x = 0; x < x_out; ++x) {
174 double alpha = accum_row[x].channel[alpha_chan] / IM_SAMPLE_MAX;
176 for (ch = 0; ch < alpha_chan; ++ch) {
177 int val = accum_row[x].channel[ch] / alpha + 0.5;
178 xscale_row[x].channel[ch] = IM_LIMIT(val);
181 xscale_row[x].channel[alpha_chan] = IM_LIMIT(accum_row[x].channel[alpha_chan]+0.5);
185 for (x = 0; x < x_out; ++x) {
186 for (ch = 0; ch < result->channels; ++ch)
187 xscale_row[x].channel[ch] = IM_LIMIT(accum_row[x].channel[ch]+0.5);
190 IM_PLIN(result, 0, x_out, y, xscale_row);
192 IM_PLIN(result, 0, x_out, y, accum_row);
196 IM_SUFFIX(horizontal_scale)(xscale_row, x_out, accum_row,
197 src->xsize, src->channels);
198 IM_PLIN(result, 0, x_out, y, xscale_row);
210 zero_row(i_fcolor *row, int width, int channels) {
214 /* with IEEE floats we could just use memset() but that's not
215 safe in general under ANSI C.
216 memset() is slightly faster.
218 for (x = 0; x < width; ++x) {
219 for (ch = 0; ch < channels; ++ch)
220 row[x].channel[ch] = 0.0;
227 IM_SUFFIX(accum_output_row)(i_fcolor *accum, double fraction, IM_COLOR const *in,
228 int width, int channels) {
231 /* it's tempting to change this into a pointer iteration loop but
232 modern CPUs do the indexing as part of the instruction */
233 if (channels == 2 || channels == 4) {
234 for (x = 0; x < width; ++x) {
235 for (ch = 0; ch < channels-1; ++ch) {
236 accum[x].channel[ch] += in[x].channel[ch] * fraction * in[x].channel[channels-1] / IM_SAMPLE_MAX;
238 accum[x].channel[channels-1] += in[x].channel[channels-1] * fraction;
242 for (x = 0; x < width; ++x) {
243 for (ch = 0; ch < channels; ++ch) {
244 accum[x].channel[ch] += in[x].channel[ch] * fraction;
251 IM_SUFFIX(horizontal_scale)(IM_COLOR *out, int out_width,
252 i_fcolor const *in, int in_width,
254 double frac_col_to_fill, frac_col_left;
257 double x_scale = (double)out_width / in_width;
259 double accum[MAXCHANNELS] = { 0 };
261 frac_col_to_fill = 1.0;
263 for (in_x = 0; in_x < in_width; ++in_x) {
264 frac_col_left = x_scale;
265 while (frac_col_left >= frac_col_to_fill) {
266 for (ch = 0; ch < channels; ++ch)
267 accum[ch] += frac_col_to_fill * in[in_x].channel[ch];
269 if (channels == 2 || channels == 4) {
270 int alpha_chan = channels - 1;
271 double alpha = accum[alpha_chan] / IM_SAMPLE_MAX;
273 for (ch = 0; ch < alpha_chan; ++ch) {
274 IM_WORK_T val = IM_ROUND(accum[ch] / alpha);
275 out[out_x].channel[ch] = IM_LIMIT(val);
278 out[out_x].channel[alpha_chan] = IM_LIMIT(IM_ROUND(accum[alpha_chan]));
281 for (ch = 0; ch < channels; ++ch) {
282 IM_WORK_T val = IM_ROUND(accum[ch]);
283 out[out_x].channel[ch] = IM_LIMIT(val);
286 for (ch = 0; ch < channels; ++ch)
288 frac_col_left -= frac_col_to_fill;
289 frac_col_to_fill = 1.0;
293 if (frac_col_left > 0) {
294 for (ch = 0; ch < channels; ++ch) {
295 accum[ch] += frac_col_left * in[in_x].channel[ch];
297 frac_col_to_fill -= frac_col_left;
301 if (out_x < out_width-1 || out_x > out_width) {
302 i_fatal(3, "Internal error: out_x %d out of range (width %d)", out_x, out_width);
305 if (out_x < out_width) {
306 for (ch = 0; ch < channels; ++ch) {
307 accum[ch] += frac_col_to_fill * in[in_width-1].channel[ch];
309 if (channels == 2 || channels == 4) {
310 int alpha_chan = channels - 1;
311 double alpha = accum[alpha_chan] / IM_SAMPLE_MAX;
313 for (ch = 0; ch < alpha_chan; ++ch) {
314 IM_WORK_T val = IM_ROUND(accum[ch] / alpha);
315 out[out_x].channel[ch] = IM_LIMIT(val);
318 out[out_x].channel[alpha_chan] = IM_LIMIT(IM_ROUND(accum[alpha_chan]));
321 for (ch = 0; ch < channels; ++ch) {
322 IM_WORK_T val = IM_ROUND(accum[ch]);
323 out[out_x].channel[ch] = IM_LIMIT(val);