/shark/trunk/ports/png/pngerror.c |
---|
0,0 → 1,291 |
/* pngerror.c - stub functions for i/o and memory allocation |
* |
* libpng 1.2.5 - October 3, 2002 |
* For conditions of distribution and use, see copyright notice in png.h |
* Copyright (c) 1998-2002 Glenn Randers-Pehrson |
* (Version 0.96 Copyright (c) 1996, 1997 Andreas Dilger) |
* (Version 0.88 Copyright (c) 1995, 1996 Guy Eric Schalnat, Group 42, Inc.) |
* |
* This file provides a location for all error handling. Users who |
* need special error handling are expected to write replacement functions |
* and use png_set_error_fn() to use those functions. See the instructions |
* at each function. |
*/ |
#define PNG_INTERNAL |
#include "png.h" |
static void /* PRIVATE */ |
png_default_error PNGARG((png_structp png_ptr, |
png_const_charp error_message)); |
static void /* PRIVATE */ |
png_default_warning PNGARG((png_structp png_ptr, |
png_const_charp warning_message)); |
/* This function is called whenever there is a fatal error. This function |
* should not be changed. If there is a need to handle errors differently, |
* you should supply a replacement error function and use png_set_error_fn() |
* to replace the error function at run-time. |
*/ |
void PNGAPI |
png_error(png_structp png_ptr, png_const_charp error_message) |
{ |
#ifdef PNG_ERROR_NUMBERS_SUPPORTED |
char msg[16]; |
if (png_ptr->flags&(PNG_FLAG_STRIP_ERROR_NUMBERS|PNG_FLAG_STRIP_ERROR_TEXT)) |
{ |
int offset = 0; |
if (*error_message == '#') |
{ |
for (offset=1; offset<15; offset++) |
if (*(error_message+offset) == ' ') |
break; |
if (png_ptr->flags&PNG_FLAG_STRIP_ERROR_TEXT) |
{ |
int i; |
for (i=0; i<offset-1; i++) |
msg[i]=error_message[i+1]; |
msg[i]='\0'; |
error_message=msg; |
} |
else |
error_message+=offset; |
} |
else |
{ |
if (png_ptr->flags&PNG_FLAG_STRIP_ERROR_TEXT) |
{ |
msg[0]='0'; |
msg[1]='\0'; |
error_message=msg; |
} |
} |
} |
#endif |
if (png_ptr->error_fn != NULL) |
(*(png_ptr->error_fn))(png_ptr, error_message); |
/* if the following returns or doesn't exist, use the default function, |
which will not return */ |
png_default_error(png_ptr, error_message); |
} |
/* This function is called whenever there is a non-fatal error. This function |
* should not be changed. If there is a need to handle warnings differently, |
* you should supply a replacement warning function and use |
* png_set_error_fn() to replace the warning function at run-time. |
*/ |
void PNGAPI |
png_warning(png_structp png_ptr, png_const_charp warning_message) |
{ |
int offset = 0; |
#ifdef PNG_ERROR_NUMBERS_SUPPORTED |
if (png_ptr->flags&(PNG_FLAG_STRIP_ERROR_NUMBERS|PNG_FLAG_STRIP_ERROR_TEXT)) |
#endif |
{ |
if (*warning_message == '#') |
{ |
for (offset=1; offset<15; offset++) |
if (*(warning_message+offset) == ' ') |
break; |
} |
} |
if (png_ptr->warning_fn != NULL) |
(*(png_ptr->warning_fn))(png_ptr, |
(png_const_charp)(warning_message+offset)); |
else |
png_default_warning(png_ptr, (png_const_charp)(warning_message+offset)); |
} |
/* These utilities are used internally to build an error message that relates |
* to the current chunk. The chunk name comes from png_ptr->chunk_name, |
* this is used to prefix the message. The message is limited in length |
* to 63 bytes, the name characters are output as hex digits wrapped in [] |
* if the character is invalid. |
*/ |
#define isnonalpha(c) ((c) < 41 || (c) > 122 || ((c) > 90 && (c) < 97)) |
static PNG_CONST char png_digit[16] = { |
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', |
'F' }; |
static void /* PRIVATE */ |
png_format_buffer(png_structp png_ptr, png_charp buffer, png_const_charp |
error_message) |
{ |
int iout = 0, iin = 0; |
while (iin < 4) |
{ |
int c = png_ptr->chunk_name[iin++]; |
if (isnonalpha(c)) |
{ |
buffer[iout++] = '['; |
buffer[iout++] = png_digit[(c & 0xf0) >> 4]; |
buffer[iout++] = png_digit[c & 0x0f]; |
buffer[iout++] = ']'; |
} |
else |
{ |
buffer[iout++] = (png_byte)c; |
} |
} |
if (error_message == NULL) |
buffer[iout] = 0; |
else |
{ |
buffer[iout++] = ':'; |
buffer[iout++] = ' '; |
png_memcpy(buffer+iout, error_message, 64); |
buffer[iout+63] = 0; |
} |
} |
void PNGAPI |
png_chunk_error(png_structp png_ptr, png_const_charp error_message) |
{ |
char msg[18+64]; |
png_format_buffer(png_ptr, msg, error_message); |
png_error(png_ptr, msg); |
} |
void PNGAPI |
png_chunk_warning(png_structp png_ptr, png_const_charp warning_message) |
{ |
char msg[18+64]; |
png_format_buffer(png_ptr, msg, warning_message); |
png_warning(png_ptr, msg); |
} |
/* This is the default error handling function. Note that replacements for |
* this function MUST NOT RETURN, or the program will likely crash. This |
* function is used by default, or if the program supplies NULL for the |
* error function pointer in png_set_error_fn(). |
*/ |
static void /* PRIVATE */ |
png_default_error(png_structp png_ptr, png_const_charp error_message) |
{ |
#ifndef PNG_NO_CONSOLE_IO |
#ifdef PNG_ERROR_NUMBERS_SUPPORTED |
if (*error_message == '#') |
{ |
int offset; |
char error_number[16]; |
for (offset=0; offset<15; offset++) |
{ |
error_number[offset] = *(error_message+offset+1); |
if (*(error_message+offset) == ' ') |
break; |
} |
if((offset > 1) && (offset < 15)) |
{ |
error_number[offset-1]='\0'; |
cprintf("libpng error no. %s: %s\n", error_number, |
error_message+offset); |
} |
else |
cprintf("libpng error: %s, offset=%d\n", error_message,offset); |
} |
else |
#endif |
cprintf("libpng error: %s\n", error_message); |
#else |
if (error_message) |
/* make compiler happy */ ; |
#endif |
#ifdef PNG_SETJMP_SUPPORTED |
# ifdef USE_FAR_KEYWORD |
{ |
jmp_buf jmpbuf; |
png_memcpy(jmpbuf,png_ptr->jmpbuf,sizeof(jmp_buf)); |
longjmp(jmpbuf, 1); |
} |
# else |
longjmp(png_ptr->jmpbuf, 1); |
# endif |
#else |
if (png_ptr) |
/* make compiler happy */ ; |
PNG_ABORT(); |
#endif |
} |
/* This function is called when there is a warning, but the library thinks |
* it can continue anyway. Replacement functions don't have to do anything |
* here if you don't want them to. In the default configuration, png_ptr is |
* not used, but it is passed in case it may be useful. |
*/ |
static void /* PRIVATE */ |
png_default_warning(png_structp png_ptr, png_const_charp warning_message) |
{ |
#ifndef PNG_NO_CONSOLE_IO |
# ifdef PNG_ERROR_NUMBERS_SUPPORTED |
if (*warning_message == '#') |
{ |
int offset; |
char warning_number[16]; |
for (offset=0; offset<15; offset++) |
{ |
warning_number[offset]=*(warning_message+offset+1); |
if (*(warning_message+offset) == ' ') |
break; |
} |
if((offset > 1) && (offset < 15)) |
{ |
warning_number[offset-1]='\0'; |
cprintf("libpng warning no. %s: %s\n", warning_number, |
warning_message+offset); |
} |
else |
cprintf("libpng warning: %s\n", warning_message); |
} |
else |
# endif |
cprintf("libpng warning: %s\n", warning_message); |
#else |
if (warning_message) |
/* appease compiler */ ; |
#endif |
if (png_ptr) |
return; |
} |
/* This function is called when the application wants to use another method |
* of handling errors and warnings. Note that the error function MUST NOT |
* return to the calling routine or serious problems will occur. The return |
* method used in the default routine calls longjmp(png_ptr->jmpbuf, 1) |
*/ |
void PNGAPI |
png_set_error_fn(png_structp png_ptr, png_voidp error_ptr, |
png_error_ptr error_fn, png_error_ptr warning_fn) |
{ |
png_ptr->error_ptr = error_ptr; |
png_ptr->error_fn = error_fn; |
png_ptr->warning_fn = warning_fn; |
} |
/* This function returns a pointer to the error_ptr associated with the user |
* functions. The application should free any memory associated with this |
* pointer before png_write_destroy and png_read_destroy are called. |
*/ |
png_voidp PNGAPI |
png_get_error_ptr(png_structp png_ptr) |
{ |
return ((png_voidp)png_ptr->error_ptr); |
} |
#ifdef PNG_ERROR_NUMBERS_SUPPORTED |
void PNGAPI |
png_set_strip_error_numbers(png_structp png_ptr, png_uint_32 strip_mode) |
{ |
if(png_ptr != NULL) |
{ |
png_ptr->flags &= |
((~(PNG_FLAG_STRIP_ERROR_NUMBERS|PNG_FLAG_STRIP_ERROR_TEXT))&strip_mode); |
} |
} |
#endif |
/shark/trunk/ports/png/infcodes.h |
---|
0,0 → 1,27 |
/* infcodes.h -- header to use infcodes.c |
* Copyright (C) 1995-2002 Mark Adler |
* For conditions of distribution and use, see copyright notice in zlib.h |
*/ |
/* WARNING: this file should *not* be used by applications. It is |
part of the implementation of the compression library and is |
subject to change. Applications should only use zlib.h. |
*/ |
struct inflate_codes_state; |
typedef struct inflate_codes_state FAR inflate_codes_statef; |
extern inflate_codes_statef *inflate_codes_new OF(( |
uInt, uInt, |
inflate_huft *, inflate_huft *, |
z_streamp )); |
extern int inflate_codes OF(( |
inflate_blocks_statef *, |
z_streamp , |
int)); |
extern void inflate_codes_free OF(( |
inflate_codes_statef *, |
z_streamp )); |
/shark/trunk/ports/png/pngwtran.c |
---|
0,0 → 1,563 |
/* pngwtran.c - transforms the data in a row for PNG writers |
* |
* libpng 1.2.5 - October 3, 2002 |
* For conditions of distribution and use, see copyright notice in png.h |
* Copyright (c) 1998-2002 Glenn Randers-Pehrson |
* (Version 0.96 Copyright (c) 1996, 1997 Andreas Dilger) |
* (Version 0.88 Copyright (c) 1995, 1996 Guy Eric Schalnat, Group 42, Inc.) |
*/ |
#define PNG_INTERNAL |
#include "png.h" |
#ifdef PNG_WRITE_SUPPORTED |
/* Transform the data according to the user's wishes. The order of |
* transformations is significant. |
*/ |
void /* PRIVATE */ |
png_do_write_transformations(png_structp png_ptr) |
{ |
png_debug(1, "in png_do_write_transformations\n"); |
if (png_ptr == NULL) |
return; |
#if defined(PNG_WRITE_USER_TRANSFORM_SUPPORTED) |
if (png_ptr->transformations & PNG_USER_TRANSFORM) |
if(png_ptr->write_user_transform_fn != NULL) |
(*(png_ptr->write_user_transform_fn)) /* user write transform function */ |
(png_ptr, /* png_ptr */ |
&(png_ptr->row_info), /* row_info: */ |
/* png_uint_32 width; width of row */ |
/* png_uint_32 rowbytes; number of bytes in row */ |
/* png_byte color_type; color type of pixels */ |
/* png_byte bit_depth; bit depth of samples */ |
/* png_byte channels; number of channels (1-4) */ |
/* png_byte pixel_depth; bits per pixel (depth*channels) */ |
png_ptr->row_buf + 1); /* start of pixel data for row */ |
#endif |
#if defined(PNG_WRITE_FILLER_SUPPORTED) |
if (png_ptr->transformations & PNG_FILLER) |
png_do_strip_filler(&(png_ptr->row_info), png_ptr->row_buf + 1, |
png_ptr->flags); |
#endif |
#if defined(PNG_WRITE_PACKSWAP_SUPPORTED) |
if (png_ptr->transformations & PNG_PACKSWAP) |
png_do_packswap(&(png_ptr->row_info), png_ptr->row_buf + 1); |
#endif |
#if defined(PNG_WRITE_PACK_SUPPORTED) |
if (png_ptr->transformations & PNG_PACK) |
png_do_pack(&(png_ptr->row_info), png_ptr->row_buf + 1, |
(png_uint_32)png_ptr->bit_depth); |
#endif |
#if defined(PNG_WRITE_SWAP_SUPPORTED) |
if (png_ptr->transformations & PNG_SWAP_BYTES) |
png_do_swap(&(png_ptr->row_info), png_ptr->row_buf + 1); |
#endif |
#if defined(PNG_WRITE_SHIFT_SUPPORTED) |
if (png_ptr->transformations & PNG_SHIFT) |
png_do_shift(&(png_ptr->row_info), png_ptr->row_buf + 1, |
&(png_ptr->shift)); |
#endif |
#if defined(PNG_WRITE_INVERT_ALPHA_SUPPORTED) |
if (png_ptr->transformations & PNG_INVERT_ALPHA) |
png_do_write_invert_alpha(&(png_ptr->row_info), png_ptr->row_buf + 1); |
#endif |
#if defined(PNG_WRITE_SWAP_ALPHA_SUPPORTED) |
if (png_ptr->transformations & PNG_SWAP_ALPHA) |
png_do_write_swap_alpha(&(png_ptr->row_info), png_ptr->row_buf + 1); |
#endif |
#if defined(PNG_WRITE_BGR_SUPPORTED) |
if (png_ptr->transformations & PNG_BGR) |
png_do_bgr(&(png_ptr->row_info), png_ptr->row_buf + 1); |
#endif |
#if defined(PNG_WRITE_INVERT_SUPPORTED) |
if (png_ptr->transformations & PNG_INVERT_MONO) |
png_do_invert(&(png_ptr->row_info), png_ptr->row_buf + 1); |
#endif |
} |
#if defined(PNG_WRITE_PACK_SUPPORTED) |
/* Pack pixels into bytes. Pass the true bit depth in bit_depth. The |
* row_info bit depth should be 8 (one pixel per byte). The channels |
* should be 1 (this only happens on grayscale and paletted images). |
*/ |
void /* PRIVATE */ |
png_do_pack(png_row_infop row_info, png_bytep row, png_uint_32 bit_depth) |
{ |
png_debug(1, "in png_do_pack\n"); |
if (row_info->bit_depth == 8 && |
#if defined(PNG_USELESS_TESTS_SUPPORTED) |
row != NULL && row_info != NULL && |
#endif |
row_info->channels == 1) |
{ |
switch ((int)bit_depth) |
{ |
case 1: |
{ |
png_bytep sp, dp; |
int mask, v; |
png_uint_32 i; |
png_uint_32 row_width = row_info->width; |
sp = row; |
dp = row; |
mask = 0x80; |
v = 0; |
for (i = 0; i < row_width; i++) |
{ |
if (*sp != 0) |
v |= mask; |
sp++; |
if (mask > 1) |
mask >>= 1; |
else |
{ |
mask = 0x80; |
*dp = (png_byte)v; |
dp++; |
v = 0; |
} |
} |
if (mask != 0x80) |
*dp = (png_byte)v; |
break; |
} |
case 2: |
{ |
png_bytep sp, dp; |
int shift, v; |
png_uint_32 i; |
png_uint_32 row_width = row_info->width; |
sp = row; |
dp = row; |
shift = 6; |
v = 0; |
for (i = 0; i < row_width; i++) |
{ |
png_byte value; |
value = (png_byte)(*sp & 0x03); |
v |= (value << shift); |
if (shift == 0) |
{ |
shift = 6; |
*dp = (png_byte)v; |
dp++; |
v = 0; |
} |
else |
shift -= 2; |
sp++; |
} |
if (shift != 6) |
*dp = (png_byte)v; |
break; |
} |
case 4: |
{ |
png_bytep sp, dp; |
int shift, v; |
png_uint_32 i; |
png_uint_32 row_width = row_info->width; |
sp = row; |
dp = row; |
shift = 4; |
v = 0; |
for (i = 0; i < row_width; i++) |
{ |
png_byte value; |
value = (png_byte)(*sp & 0x0f); |
v |= (value << shift); |
if (shift == 0) |
{ |
shift = 4; |
*dp = (png_byte)v; |
dp++; |
v = 0; |
} |
else |
shift -= 4; |
sp++; |
} |
if (shift != 4) |
*dp = (png_byte)v; |
break; |
} |
} |
row_info->bit_depth = (png_byte)bit_depth; |
row_info->pixel_depth = (png_byte)(bit_depth * row_info->channels); |
row_info->rowbytes = |
((row_info->width * row_info->pixel_depth + 7) >> 3); |
} |
} |
#endif |
#if defined(PNG_WRITE_SHIFT_SUPPORTED) |
/* Shift pixel values to take advantage of whole range. Pass the |
* true number of bits in bit_depth. The row should be packed |
* according to row_info->bit_depth. Thus, if you had a row of |
* bit depth 4, but the pixels only had values from 0 to 7, you |
* would pass 3 as bit_depth, and this routine would translate the |
* data to 0 to 15. |
*/ |
void /* PRIVATE */ |
png_do_shift(png_row_infop row_info, png_bytep row, png_color_8p bit_depth) |
{ |
png_debug(1, "in png_do_shift\n"); |
#if defined(PNG_USELESS_TESTS_SUPPORTED) |
if (row != NULL && row_info != NULL && |
#else |
if ( |
#endif |
row_info->color_type != PNG_COLOR_TYPE_PALETTE) |
{ |
int shift_start[4], shift_dec[4]; |
int channels = 0; |
if (row_info->color_type & PNG_COLOR_MASK_COLOR) |
{ |
shift_start[channels] = row_info->bit_depth - bit_depth->red; |
shift_dec[channels] = bit_depth->red; |
channels++; |
shift_start[channels] = row_info->bit_depth - bit_depth->green; |
shift_dec[channels] = bit_depth->green; |
channels++; |
shift_start[channels] = row_info->bit_depth - bit_depth->blue; |
shift_dec[channels] = bit_depth->blue; |
channels++; |
} |
else |
{ |
shift_start[channels] = row_info->bit_depth - bit_depth->gray; |
shift_dec[channels] = bit_depth->gray; |
channels++; |
} |
if (row_info->color_type & PNG_COLOR_MASK_ALPHA) |
{ |
shift_start[channels] = row_info->bit_depth - bit_depth->alpha; |
shift_dec[channels] = bit_depth->alpha; |
channels++; |
} |
/* with low row depths, could only be grayscale, so one channel */ |
if (row_info->bit_depth < 8) |
{ |
png_bytep bp = row; |
png_uint_32 i; |
png_byte mask; |
png_uint_32 row_bytes = row_info->rowbytes; |
if (bit_depth->gray == 1 && row_info->bit_depth == 2) |
mask = 0x55; |
else if (row_info->bit_depth == 4 && bit_depth->gray == 3) |
mask = 0x11; |
else |
mask = 0xff; |
for (i = 0; i < row_bytes; i++, bp++) |
{ |
png_uint_16 v; |
int j; |
v = *bp; |
*bp = 0; |
for (j = shift_start[0]; j > -shift_dec[0]; j -= shift_dec[0]) |
{ |
if (j > 0) |
*bp |= (png_byte)((v << j) & 0xff); |
else |
*bp |= (png_byte)((v >> (-j)) & mask); |
} |
} |
} |
else if (row_info->bit_depth == 8) |
{ |
png_bytep bp = row; |
png_uint_32 i; |
png_uint_32 istop = channels * row_info->width; |
for (i = 0; i < istop; i++, bp++) |
{ |
png_uint_16 v; |
int j; |
int c = (int)(i%channels); |
v = *bp; |
*bp = 0; |
for (j = shift_start[c]; j > -shift_dec[c]; j -= shift_dec[c]) |
{ |
if (j > 0) |
*bp |= (png_byte)((v << j) & 0xff); |
else |
*bp |= (png_byte)((v >> (-j)) & 0xff); |
} |
} |
} |
else |
{ |
png_bytep bp; |
png_uint_32 i; |
png_uint_32 istop = channels * row_info->width; |
for (bp = row, i = 0; i < istop; i++) |
{ |
int c = (int)(i%channels); |
png_uint_16 value, v; |
int j; |
v = (png_uint_16)(((png_uint_16)(*bp) << 8) + *(bp + 1)); |
value = 0; |
for (j = shift_start[c]; j > -shift_dec[c]; j -= shift_dec[c]) |
{ |
if (j > 0) |
value |= (png_uint_16)((v << j) & (png_uint_16)0xffff); |
else |
value |= (png_uint_16)((v >> (-j)) & (png_uint_16)0xffff); |
} |
*bp++ = (png_byte)(value >> 8); |
*bp++ = (png_byte)(value & 0xff); |
} |
} |
} |
} |
#endif |
#if defined(PNG_WRITE_SWAP_ALPHA_SUPPORTED) |
void /* PRIVATE */ |
png_do_write_swap_alpha(png_row_infop row_info, png_bytep row) |
{ |
png_debug(1, "in png_do_write_swap_alpha\n"); |
#if defined(PNG_USELESS_TESTS_SUPPORTED) |
if (row != NULL && row_info != NULL) |
#endif |
{ |
if (row_info->color_type == PNG_COLOR_TYPE_RGB_ALPHA) |
{ |
/* This converts from ARGB to RGBA */ |
if (row_info->bit_depth == 8) |
{ |
png_bytep sp, dp; |
png_uint_32 i; |
png_uint_32 row_width = row_info->width; |
for (i = 0, sp = dp = row; i < row_width; i++) |
{ |
png_byte save = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = save; |
} |
} |
/* This converts from AARRGGBB to RRGGBBAA */ |
else |
{ |
png_bytep sp, dp; |
png_uint_32 i; |
png_uint_32 row_width = row_info->width; |
for (i = 0, sp = dp = row; i < row_width; i++) |
{ |
png_byte save[2]; |
save[0] = *(sp++); |
save[1] = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = save[0]; |
*(dp++) = save[1]; |
} |
} |
} |
else if (row_info->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) |
{ |
/* This converts from AG to GA */ |
if (row_info->bit_depth == 8) |
{ |
png_bytep sp, dp; |
png_uint_32 i; |
png_uint_32 row_width = row_info->width; |
for (i = 0, sp = dp = row; i < row_width; i++) |
{ |
png_byte save = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = save; |
} |
} |
/* This converts from AAGG to GGAA */ |
else |
{ |
png_bytep sp, dp; |
png_uint_32 i; |
png_uint_32 row_width = row_info->width; |
for (i = 0, sp = dp = row; i < row_width; i++) |
{ |
png_byte save[2]; |
save[0] = *(sp++); |
save[1] = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = save[0]; |
*(dp++) = save[1]; |
} |
} |
} |
} |
} |
#endif |
#if defined(PNG_WRITE_INVERT_ALPHA_SUPPORTED) |
void /* PRIVATE */ |
png_do_write_invert_alpha(png_row_infop row_info, png_bytep row) |
{ |
png_debug(1, "in png_do_write_invert_alpha\n"); |
#if defined(PNG_USELESS_TESTS_SUPPORTED) |
if (row != NULL && row_info != NULL) |
#endif |
{ |
if (row_info->color_type == PNG_COLOR_TYPE_RGB_ALPHA) |
{ |
/* This inverts the alpha channel in RGBA */ |
if (row_info->bit_depth == 8) |
{ |
png_bytep sp, dp; |
png_uint_32 i; |
png_uint_32 row_width = row_info->width; |
for (i = 0, sp = dp = row; i < row_width; i++) |
{ |
*(dp++) = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = (png_byte)(255 - *(sp++)); |
} |
} |
/* This inverts the alpha channel in RRGGBBAA */ |
else |
{ |
png_bytep sp, dp; |
png_uint_32 i; |
png_uint_32 row_width = row_info->width; |
for (i = 0, sp = dp = row; i < row_width; i++) |
{ |
*(dp++) = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = (png_byte)(255 - *(sp++)); |
*(dp++) = (png_byte)(255 - *(sp++)); |
} |
} |
} |
else if (row_info->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) |
{ |
/* This inverts the alpha channel in GA */ |
if (row_info->bit_depth == 8) |
{ |
png_bytep sp, dp; |
png_uint_32 i; |
png_uint_32 row_width = row_info->width; |
for (i = 0, sp = dp = row; i < row_width; i++) |
{ |
*(dp++) = *(sp++); |
*(dp++) = (png_byte)(255 - *(sp++)); |
} |
} |
/* This inverts the alpha channel in GGAA */ |
else |
{ |
png_bytep sp, dp; |
png_uint_32 i; |
png_uint_32 row_width = row_info->width; |
for (i = 0, sp = dp = row; i < row_width; i++) |
{ |
*(dp++) = *(sp++); |
*(dp++) = *(sp++); |
*(dp++) = (png_byte)(255 - *(sp++)); |
*(dp++) = (png_byte)(255 - *(sp++)); |
} |
} |
} |
} |
} |
#endif |
#if defined(PNG_MNG_FEATURES_SUPPORTED) |
/* undoes intrapixel differencing */ |
void /* PRIVATE */ |
png_do_write_intrapixel(png_row_infop row_info, png_bytep row) |
{ |
png_debug(1, "in png_do_write_intrapixel\n"); |
if ( |
#if defined(PNG_USELESS_TESTS_SUPPORTED) |
row != NULL && row_info != NULL && |
#endif |
(row_info->color_type & PNG_COLOR_MASK_COLOR)) |
{ |
int bytes_per_pixel; |
png_uint_32 row_width = row_info->width; |
if (row_info->bit_depth == 8) |
{ |
png_bytep rp; |
png_uint_32 i; |
if (row_info->color_type == PNG_COLOR_TYPE_RGB) |
bytes_per_pixel = 3; |
else if (row_info->color_type == PNG_COLOR_TYPE_RGB_ALPHA) |
bytes_per_pixel = 4; |
else |
return; |
for (i = 0, rp = row; i < row_width; i++, rp += bytes_per_pixel) |
{ |
*(rp) = (png_byte)((*rp - *(rp+1))&0xff); |
*(rp+2) = (png_byte)((*(rp+2) - *(rp+1))&0xff); |
} |
} |
else if (row_info->bit_depth == 16) |
{ |
png_bytep rp; |
png_uint_32 i; |
if (row_info->color_type == PNG_COLOR_TYPE_RGB) |
bytes_per_pixel = 6; |
else if (row_info->color_type == PNG_COLOR_TYPE_RGB_ALPHA) |
bytes_per_pixel = 8; |
else |
return; |
for (i = 0, rp = row; i < row_width; i++, rp += bytes_per_pixel) |
{ |
png_uint_32 s0=*(rp )<<8 | *(rp+1); |
png_uint_32 s1=*(rp+2)<<8 | *(rp+3); |
png_uint_32 s2=*(rp+4)<<8 | *(rp+5); |
png_uint_32 red=(s0-s1)&0xffff; |
png_uint_32 blue=(s2-s1)&0xffff; |
*(rp ) = (png_byte)((red>>8)&0xff); |
*(rp+1) = (png_byte)(red&0xff); |
*(rp+4) = (png_byte)((blue>>8)&0xff); |
*(rp+5) = (png_byte)(blue&0xff); |
} |
} |
} |
} |
#endif /* PNG_MNG_FEATURES_SUPPORTED */ |
#endif /* PNG_WRITE_SUPPORTED */ |
/shark/trunk/ports/png/inffixed.h |
---|
0,0 → 1,151 |
/* inffixed.h -- table for decoding fixed codes |
* Generated automatically by the maketree.c program |
*/ |
/* WARNING: this file should *not* be used by applications. It is |
part of the implementation of the compression library and is |
subject to change. Applications should only use zlib.h. |
*/ |
local uInt fixed_bl = 9; |
local uInt fixed_bd = 5; |
local inflate_huft fixed_tl[] = { |
{{{96,7}},256}, {{{0,8}},80}, {{{0,8}},16}, {{{84,8}},115}, |
{{{82,7}},31}, {{{0,8}},112}, {{{0,8}},48}, {{{0,9}},192}, |
{{{80,7}},10}, {{{0,8}},96}, {{{0,8}},32}, {{{0,9}},160}, |
{{{0,8}},0}, {{{0,8}},128}, {{{0,8}},64}, {{{0,9}},224}, |
{{{80,7}},6}, {{{0,8}},88}, {{{0,8}},24}, {{{0,9}},144}, |
{{{83,7}},59}, {{{0,8}},120}, {{{0,8}},56}, {{{0,9}},208}, |
{{{81,7}},17}, {{{0,8}},104}, {{{0,8}},40}, {{{0,9}},176}, |
{{{0,8}},8}, {{{0,8}},136}, {{{0,8}},72}, {{{0,9}},240}, |
{{{80,7}},4}, {{{0,8}},84}, {{{0,8}},20}, {{{85,8}},227}, |
{{{83,7}},43}, {{{0,8}},116}, {{{0,8}},52}, {{{0,9}},200}, |
{{{81,7}},13}, {{{0,8}},100}, {{{0,8}},36}, {{{0,9}},168}, |
{{{0,8}},4}, {{{0,8}},132}, {{{0,8}},68}, {{{0,9}},232}, |
{{{80,7}},8}, {{{0,8}},92}, {{{0,8}},28}, {{{0,9}},152}, |
{{{84,7}},83}, {{{0,8}},124}, {{{0,8}},60}, {{{0,9}},216}, |
{{{82,7}},23}, {{{0,8}},108}, {{{0,8}},44}, {{{0,9}},184}, |
{{{0,8}},12}, {{{0,8}},140}, {{{0,8}},76}, {{{0,9}},248}, |
{{{80,7}},3}, {{{0,8}},82}, {{{0,8}},18}, {{{85,8}},163}, |
{{{83,7}},35}, {{{0,8}},114}, {{{0,8}},50}, {{{0,9}},196}, |
{{{81,7}},11}, {{{0,8}},98}, {{{0,8}},34}, {{{0,9}},164}, |
{{{0,8}},2}, {{{0,8}},130}, {{{0,8}},66}, {{{0,9}},228}, |
{{{80,7}},7}, {{{0,8}},90}, {{{0,8}},26}, {{{0,9}},148}, |
{{{84,7}},67}, {{{0,8}},122}, {{{0,8}},58}, {{{0,9}},212}, |
{{{82,7}},19}, {{{0,8}},106}, {{{0,8}},42}, {{{0,9}},180}, |
{{{0,8}},10}, {{{0,8}},138}, {{{0,8}},74}, {{{0,9}},244}, |
{{{80,7}},5}, {{{0,8}},86}, {{{0,8}},22}, {{{192,8}},0}, |
{{{83,7}},51}, {{{0,8}},118}, {{{0,8}},54}, {{{0,9}},204}, |
{{{81,7}},15}, {{{0,8}},102}, {{{0,8}},38}, {{{0,9}},172}, |
{{{0,8}},6}, {{{0,8}},134}, {{{0,8}},70}, {{{0,9}},236}, |
{{{80,7}},9}, {{{0,8}},94}, {{{0,8}},30}, {{{0,9}},156}, |
{{{84,7}},99}, {{{0,8}},126}, {{{0,8}},62}, {{{0,9}},220}, |
{{{82,7}},27}, {{{0,8}},110}, {{{0,8}},46}, {{{0,9}},188}, |
{{{0,8}},14}, {{{0,8}},142}, {{{0,8}},78}, {{{0,9}},252}, |
{{{96,7}},256}, {{{0,8}},81}, {{{0,8}},17}, {{{85,8}},131}, |
{{{82,7}},31}, {{{0,8}},113}, {{{0,8}},49}, {{{0,9}},194}, |
{{{80,7}},10}, {{{0,8}},97}, {{{0,8}},33}, {{{0,9}},162}, |
{{{0,8}},1}, {{{0,8}},129}, {{{0,8}},65}, {{{0,9}},226}, |
{{{80,7}},6}, {{{0,8}},89}, {{{0,8}},25}, {{{0,9}},146}, |
{{{83,7}},59}, {{{0,8}},121}, {{{0,8}},57}, {{{0,9}},210}, |
{{{81,7}},17}, {{{0,8}},105}, {{{0,8}},41}, {{{0,9}},178}, |
{{{0,8}},9}, {{{0,8}},137}, {{{0,8}},73}, {{{0,9}},242}, |
{{{80,7}},4}, {{{0,8}},85}, {{{0,8}},21}, {{{80,8}},258}, |
{{{83,7}},43}, {{{0,8}},117}, {{{0,8}},53}, {{{0,9}},202}, |
{{{81,7}},13}, {{{0,8}},101}, {{{0,8}},37}, {{{0,9}},170}, |
{{{0,8}},5}, {{{0,8}},133}, {{{0,8}},69}, {{{0,9}},234}, |
{{{80,7}},8}, {{{0,8}},93}, {{{0,8}},29}, {{{0,9}},154}, |
{{{84,7}},83}, {{{0,8}},125}, {{{0,8}},61}, {{{0,9}},218}, |
{{{82,7}},23}, {{{0,8}},109}, {{{0,8}},45}, {{{0,9}},186}, |
{{{0,8}},13}, {{{0,8}},141}, {{{0,8}},77}, {{{0,9}},250}, |
{{{80,7}},3}, {{{0,8}},83}, {{{0,8}},19}, {{{85,8}},195}, |
{{{83,7}},35}, {{{0,8}},115}, {{{0,8}},51}, {{{0,9}},198}, |
{{{81,7}},11}, {{{0,8}},99}, {{{0,8}},35}, {{{0,9}},166}, |
{{{0,8}},3}, {{{0,8}},131}, {{{0,8}},67}, {{{0,9}},230}, |
{{{80,7}},7}, {{{0,8}},91}, {{{0,8}},27}, {{{0,9}},150}, |
{{{84,7}},67}, {{{0,8}},123}, {{{0,8}},59}, {{{0,9}},214}, |
{{{82,7}},19}, {{{0,8}},107}, {{{0,8}},43}, {{{0,9}},182}, |
{{{0,8}},11}, {{{0,8}},139}, {{{0,8}},75}, {{{0,9}},246}, |
{{{80,7}},5}, {{{0,8}},87}, {{{0,8}},23}, {{{192,8}},0}, |
{{{83,7}},51}, {{{0,8}},119}, {{{0,8}},55}, {{{0,9}},206}, |
{{{81,7}},15}, {{{0,8}},103}, {{{0,8}},39}, {{{0,9}},174}, |
{{{0,8}},7}, {{{0,8}},135}, {{{0,8}},71}, {{{0,9}},238}, |
{{{80,7}},9}, {{{0,8}},95}, {{{0,8}},31}, {{{0,9}},158}, |
{{{84,7}},99}, {{{0,8}},127}, {{{0,8}},63}, {{{0,9}},222}, |
{{{82,7}},27}, {{{0,8}},111}, {{{0,8}},47}, {{{0,9}},190}, |
{{{0,8}},15}, {{{0,8}},143}, {{{0,8}},79}, {{{0,9}},254}, |
{{{96,7}},256}, {{{0,8}},80}, {{{0,8}},16}, {{{84,8}},115}, |
{{{82,7}},31}, {{{0,8}},112}, {{{0,8}},48}, {{{0,9}},193}, |
{{{80,7}},10}, {{{0,8}},96}, {{{0,8}},32}, {{{0,9}},161}, |
{{{0,8}},0}, {{{0,8}},128}, {{{0,8}},64}, {{{0,9}},225}, |
{{{80,7}},6}, {{{0,8}},88}, {{{0,8}},24}, {{{0,9}},145}, |
{{{83,7}},59}, {{{0,8}},120}, {{{0,8}},56}, {{{0,9}},209}, |
{{{81,7}},17}, {{{0,8}},104}, {{{0,8}},40}, {{{0,9}},177}, |
{{{0,8}},8}, {{{0,8}},136}, {{{0,8}},72}, {{{0,9}},241}, |
{{{80,7}},4}, {{{0,8}},84}, {{{0,8}},20}, {{{85,8}},227}, |
{{{83,7}},43}, {{{0,8}},116}, {{{0,8}},52}, {{{0,9}},201}, |
{{{81,7}},13}, {{{0,8}},100}, {{{0,8}},36}, {{{0,9}},169}, |
{{{0,8}},4}, {{{0,8}},132}, {{{0,8}},68}, {{{0,9}},233}, |
{{{80,7}},8}, {{{0,8}},92}, {{{0,8}},28}, {{{0,9}},153}, |
{{{84,7}},83}, {{{0,8}},124}, {{{0,8}},60}, {{{0,9}},217}, |
{{{82,7}},23}, {{{0,8}},108}, {{{0,8}},44}, {{{0,9}},185}, |
{{{0,8}},12}, {{{0,8}},140}, {{{0,8}},76}, {{{0,9}},249}, |
{{{80,7}},3}, {{{0,8}},82}, {{{0,8}},18}, {{{85,8}},163}, |
{{{83,7}},35}, {{{0,8}},114}, {{{0,8}},50}, {{{0,9}},197}, |
{{{81,7}},11}, {{{0,8}},98}, {{{0,8}},34}, {{{0,9}},165}, |
{{{0,8}},2}, {{{0,8}},130}, {{{0,8}},66}, {{{0,9}},229}, |
{{{80,7}},7}, {{{0,8}},90}, {{{0,8}},26}, {{{0,9}},149}, |
{{{84,7}},67}, {{{0,8}},122}, {{{0,8}},58}, {{{0,9}},213}, |
{{{82,7}},19}, {{{0,8}},106}, {{{0,8}},42}, {{{0,9}},181}, |
{{{0,8}},10}, {{{0,8}},138}, {{{0,8}},74}, {{{0,9}},245}, |
{{{80,7}},5}, {{{0,8}},86}, {{{0,8}},22}, {{{192,8}},0}, |
{{{83,7}},51}, {{{0,8}},118}, {{{0,8}},54}, {{{0,9}},205}, |
{{{81,7}},15}, {{{0,8}},102}, {{{0,8}},38}, {{{0,9}},173}, |
{{{0,8}},6}, {{{0,8}},134}, {{{0,8}},70}, {{{0,9}},237}, |
{{{80,7}},9}, {{{0,8}},94}, {{{0,8}},30}, {{{0,9}},157}, |
{{{84,7}},99}, {{{0,8}},126}, {{{0,8}},62}, {{{0,9}},221}, |
{{{82,7}},27}, {{{0,8}},110}, {{{0,8}},46}, {{{0,9}},189}, |
{{{0,8}},14}, {{{0,8}},142}, {{{0,8}},78}, {{{0,9}},253}, |
{{{96,7}},256}, {{{0,8}},81}, {{{0,8}},17}, {{{85,8}},131}, |
{{{82,7}},31}, {{{0,8}},113}, {{{0,8}},49}, {{{0,9}},195}, |
{{{80,7}},10}, {{{0,8}},97}, {{{0,8}},33}, {{{0,9}},163}, |
{{{0,8}},1}, {{{0,8}},129}, {{{0,8}},65}, {{{0,9}},227}, |
{{{80,7}},6}, {{{0,8}},89}, {{{0,8}},25}, {{{0,9}},147}, |
{{{83,7}},59}, {{{0,8}},121}, {{{0,8}},57}, {{{0,9}},211}, |
{{{81,7}},17}, {{{0,8}},105}, {{{0,8}},41}, {{{0,9}},179}, |
{{{0,8}},9}, {{{0,8}},137}, {{{0,8}},73}, {{{0,9}},243}, |
{{{80,7}},4}, {{{0,8}},85}, {{{0,8}},21}, {{{80,8}},258}, |
{{{83,7}},43}, {{{0,8}},117}, {{{0,8}},53}, {{{0,9}},203}, |
{{{81,7}},13}, {{{0,8}},101}, {{{0,8}},37}, {{{0,9}},171}, |
{{{0,8}},5}, {{{0,8}},133}, {{{0,8}},69}, {{{0,9}},235}, |
{{{80,7}},8}, {{{0,8}},93}, {{{0,8}},29}, {{{0,9}},155}, |
{{{84,7}},83}, {{{0,8}},125}, {{{0,8}},61}, {{{0,9}},219}, |
{{{82,7}},23}, {{{0,8}},109}, {{{0,8}},45}, {{{0,9}},187}, |
{{{0,8}},13}, {{{0,8}},141}, {{{0,8}},77}, {{{0,9}},251}, |
{{{80,7}},3}, {{{0,8}},83}, {{{0,8}},19}, {{{85,8}},195}, |
{{{83,7}},35}, {{{0,8}},115}, {{{0,8}},51}, {{{0,9}},199}, |
{{{81,7}},11}, {{{0,8}},99}, {{{0,8}},35}, {{{0,9}},167}, |
{{{0,8}},3}, {{{0,8}},131}, {{{0,8}},67}, {{{0,9}},231}, |
{{{80,7}},7}, {{{0,8}},91}, {{{0,8}},27}, {{{0,9}},151}, |
{{{84,7}},67}, {{{0,8}},123}, {{{0,8}},59}, {{{0,9}},215}, |
{{{82,7}},19}, {{{0,8}},107}, {{{0,8}},43}, {{{0,9}},183}, |
{{{0,8}},11}, {{{0,8}},139}, {{{0,8}},75}, {{{0,9}},247}, |
{{{80,7}},5}, {{{0,8}},87}, {{{0,8}},23}, {{{192,8}},0}, |
{{{83,7}},51}, {{{0,8}},119}, {{{0,8}},55}, {{{0,9}},207}, |
{{{81,7}},15}, {{{0,8}},103}, {{{0,8}},39}, {{{0,9}},175}, |
{{{0,8}},7}, {{{0,8}},135}, {{{0,8}},71}, {{{0,9}},239}, |
{{{80,7}},9}, {{{0,8}},95}, {{{0,8}},31}, {{{0,9}},159}, |
{{{84,7}},99}, {{{0,8}},127}, {{{0,8}},63}, {{{0,9}},223}, |
{{{82,7}},27}, {{{0,8}},111}, {{{0,8}},47}, {{{0,9}},191}, |
{{{0,8}},15}, {{{0,8}},143}, {{{0,8}},79}, {{{0,9}},255} |
}; |
local inflate_huft fixed_td[] = { |
{{{80,5}},1}, {{{87,5}},257}, {{{83,5}},17}, {{{91,5}},4097}, |
{{{81,5}},5}, {{{89,5}},1025}, {{{85,5}},65}, {{{93,5}},16385}, |
{{{80,5}},3}, {{{88,5}},513}, {{{84,5}},33}, {{{92,5}},8193}, |
{{{82,5}},9}, {{{90,5}},2049}, {{{86,5}},129}, {{{192,5}},24577}, |
{{{80,5}},2}, {{{87,5}},385}, {{{83,5}},25}, {{{91,5}},6145}, |
{{{81,5}},7}, {{{89,5}},1537}, {{{85,5}},97}, {{{93,5}},24577}, |
{{{80,5}},4}, {{{88,5}},769}, {{{84,5}},49}, {{{92,5}},12289}, |
{{{82,5}},13}, {{{90,5}},3073}, {{{86,5}},193}, {{{192,5}},24577} |
}; |
/shark/trunk/ports/png/pngvcrd.c |
---|
0,0 → 1,3845 |
/* pngvcrd.c - mixed C/assembler version of utilities to read a PNG file |
* |
* For Intel x86 CPU and Microsoft Visual C++ compiler |
* |
* libpng version 1.2.5 - October 3, 2002 |
* For conditions of distribution and use, see copyright notice in png.h |
* Copyright (c) 1998-2002 Glenn Randers-Pehrson |
* Copyright (c) 1998, Intel Corporation |
* |
* Contributed by Nirav Chhatrapati, Intel Corporation, 1998 |
* Interface to libpng contributed by Gilles Vollant, 1999 |
* |
* |
* In png_do_read_interlace() in libpng versions 1.0.3a through 1.0.4d, |
* a sign error in the post-MMX cleanup code for each pixel_depth resulted |
* in bad pixels at the beginning of some rows of some images, and also |
* (due to out-of-range memory reads and writes) caused heap corruption |
* when compiled with MSVC 6.0. The error was fixed in version 1.0.4e. |
* |
* [png_read_filter_row_mmx_avg() bpp == 2 bugfix, GRR 20000916] |
* |
* [runtime MMX configuration, GRR 20010102] |
* |
*/ |
#define PNG_INTERNAL |
#include "png.h" |
#if defined(PNG_ASSEMBLER_CODE_SUPPORTED) && defined(PNG_USE_PNGVCRD) |
static int mmx_supported=2; |
int PNGAPI |
png_mmx_support(void) |
{ |
int mmx_supported_local = 0; |
_asm { |
push ebx //CPUID will trash these |
push ecx |
push edx |
pushfd //Save Eflag to stack |
pop eax //Get Eflag from stack into eax |
mov ecx, eax //Make another copy of Eflag in ecx |
xor eax, 0x200000 //Toggle ID bit in Eflag [i.e. bit(21)] |
push eax //Save modified Eflag back to stack |
popfd //Restored modified value back to Eflag reg |
pushfd //Save Eflag to stack |
pop eax //Get Eflag from stack |
push ecx // save original Eflag to stack |
popfd // restore original Eflag |
xor eax, ecx //Compare the new Eflag with the original Eflag |
jz NOT_SUPPORTED //If the same, CPUID instruction is not supported, |
//skip following instructions and jump to |
//NOT_SUPPORTED label |
xor eax, eax //Set eax to zero |
_asm _emit 0x0f //CPUID instruction (two bytes opcode) |
_asm _emit 0xa2 |
cmp eax, 1 //make sure eax return non-zero value |
jl NOT_SUPPORTED //If eax is zero, mmx not supported |
xor eax, eax //set eax to zero |
inc eax //Now increment eax to 1. This instruction is |
//faster than the instruction "mov eax, 1" |
_asm _emit 0x0f //CPUID instruction |
_asm _emit 0xa2 |
and edx, 0x00800000 //mask out all bits but mmx bit(24) |
cmp edx, 0 // 0 = mmx not supported |
jz NOT_SUPPORTED // non-zero = Yes, mmx IS supported |
mov mmx_supported_local, 1 //set return value to 1 |
NOT_SUPPORTED: |
mov eax, mmx_supported_local //move return value to eax |
pop edx //CPUID trashed these |
pop ecx |
pop ebx |
} |
//mmx_supported_local=0; // test code for force don't support MMX |
//printf("MMX : %u (1=MMX supported)\n",mmx_supported_local); |
mmx_supported = mmx_supported_local; |
return mmx_supported_local; |
} |
/* Combines the row recently read in with the previous row. |
This routine takes care of alpha and transparency if requested. |
This routine also handles the two methods of progressive display |
of interlaced images, depending on the mask value. |
The mask value describes which pixels are to be combined with |
the row. The pattern always repeats every 8 pixels, so just 8 |
bits are needed. A one indicates the pixel is to be combined; a |
zero indicates the pixel is to be skipped. This is in addition |
to any alpha or transparency value associated with the pixel. If |
you want all pixels to be combined, pass 0xff (255) in mask. */ |
/* Use this routine for x86 platform - uses faster MMX routine if machine |
supports MMX */ |
void /* PRIVATE */ |
png_combine_row(png_structp png_ptr, png_bytep row, int mask) |
{ |
#ifdef PNG_USE_LOCAL_ARRAYS |
const int png_pass_inc[7] = {8, 8, 4, 4, 2, 2, 1}; |
#endif |
png_debug(1,"in png_combine_row_asm\n"); |
if (mmx_supported == 2) { |
/* this should have happened in png_init_mmx_flags() already */ |
png_warning(png_ptr, "asm_flags may not have been initialized"); |
png_mmx_support(); |
} |
if (mask == 0xff) |
{ |
png_memcpy(row, png_ptr->row_buf + 1, |
(png_size_t)((png_ptr->width * png_ptr->row_info.pixel_depth + 7) >> 3)); |
} |
/* GRR: add "else if (mask == 0)" case? |
* or does png_combine_row() not even get called in that case? */ |
else |
{ |
switch (png_ptr->row_info.pixel_depth) |
{ |
case 1: |
{ |
png_bytep sp; |
png_bytep dp; |
int s_inc, s_start, s_end; |
int m; |
int shift; |
png_uint_32 i; |
sp = png_ptr->row_buf + 1; |
dp = row; |
m = 0x80; |
#if defined(PNG_READ_PACKSWAP_SUPPORTED) |
if (png_ptr->transformations & PNG_PACKSWAP) |
{ |
s_start = 0; |
s_end = 7; |
s_inc = 1; |
} |
else |
#endif |
{ |
s_start = 7; |
s_end = 0; |
s_inc = -1; |
} |
shift = s_start; |
for (i = 0; i < png_ptr->width; i++) |
{ |
if (m & mask) |
{ |
int value; |
value = (*sp >> shift) & 0x1; |
*dp &= (png_byte)((0x7f7f >> (7 - shift)) & 0xff); |
*dp |= (png_byte)(value << shift); |
} |
if (shift == s_end) |
{ |
shift = s_start; |
sp++; |
dp++; |
} |
else |
shift += s_inc; |
if (m == 1) |
m = 0x80; |
else |
m >>= 1; |
} |
break; |
} |
case 2: |
{ |
png_bytep sp; |
png_bytep dp; |
int s_start, s_end, s_inc; |
int m; |
int shift; |
png_uint_32 i; |
int value; |
sp = png_ptr->row_buf + 1; |
dp = row; |
m = 0x80; |
#if defined(PNG_READ_PACKSWAP_SUPPORTED) |
if (png_ptr->transformations & PNG_PACKSWAP) |
{ |
s_start = 0; |
s_end = 6; |
s_inc = 2; |
} |
else |
#endif |
{ |
s_start = 6; |
s_end = 0; |
s_inc = -2; |
} |
shift = s_start; |
for (i = 0; i < png_ptr->width; i++) |
{ |
if (m & mask) |
{ |
value = (*sp >> shift) & 0x3; |
*dp &= (png_byte)((0x3f3f >> (6 - shift)) & 0xff); |
*dp |= (png_byte)(value << shift); |
} |
if (shift == s_end) |
{ |
shift = s_start; |
sp++; |
dp++; |
} |
else |
shift += s_inc; |
if (m == 1) |
m = 0x80; |
else |
m >>= 1; |
} |
break; |
} |
case 4: |
{ |
png_bytep sp; |
png_bytep dp; |
int s_start, s_end, s_inc; |
int m; |
int shift; |
png_uint_32 i; |
int value; |
sp = png_ptr->row_buf + 1; |
dp = row; |
m = 0x80; |
#if defined(PNG_READ_PACKSWAP_SUPPORTED) |
if (png_ptr->transformations & PNG_PACKSWAP) |
{ |
s_start = 0; |
s_end = 4; |
s_inc = 4; |
} |
else |
#endif |
{ |
s_start = 4; |
s_end = 0; |
s_inc = -4; |
} |
shift = s_start; |
for (i = 0; i < png_ptr->width; i++) |
{ |
if (m & mask) |
{ |
value = (*sp >> shift) & 0xf; |
*dp &= (png_byte)((0xf0f >> (4 - shift)) & 0xff); |
*dp |= (png_byte)(value << shift); |
} |
if (shift == s_end) |
{ |
shift = s_start; |
sp++; |
dp++; |
} |
else |
shift += s_inc; |
if (m == 1) |
m = 0x80; |
else |
m >>= 1; |
} |
break; |
} |
case 8: |
{ |
png_bytep srcptr; |
png_bytep dstptr; |
png_uint_32 len; |
int m; |
int diff, unmask; |
__int64 mask0=0x0102040810204080; |
if ((png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_COMBINE_ROW) |
/* && mmx_supported */ ) |
{ |
srcptr = png_ptr->row_buf + 1; |
dstptr = row; |
m = 0x80; |
unmask = ~mask; |
len = png_ptr->width &~7; //reduce to multiple of 8 |
diff = png_ptr->width & 7; //amount lost |
_asm |
{ |
movd mm7, unmask //load bit pattern |
psubb mm6,mm6 //zero mm6 |
punpcklbw mm7,mm7 |
punpcklwd mm7,mm7 |
punpckldq mm7,mm7 //fill register with 8 masks |
movq mm0,mask0 |
pand mm0,mm7 //nonzero if keep byte |
pcmpeqb mm0,mm6 //zeros->1s, v versa |
mov ecx,len //load length of line (pixels) |
mov esi,srcptr //load source |
mov ebx,dstptr //load dest |
cmp ecx,0 //lcr |
je mainloop8end |
mainloop8: |
movq mm4,[esi] |
pand mm4,mm0 |
movq mm6,mm0 |
pandn mm6,[ebx] |
por mm4,mm6 |
movq [ebx],mm4 |
add esi,8 //inc by 8 bytes processed |
add ebx,8 |
sub ecx,8 //dec by 8 pixels processed |
ja mainloop8 |
mainloop8end: |
mov ecx,diff |
cmp ecx,0 |
jz end8 |
mov edx,mask |
sal edx,24 //make low byte the high byte |
secondloop8: |
sal edx,1 //move high bit to CF |
jnc skip8 //if CF = 0 |
mov al,[esi] |
mov [ebx],al |
skip8: |
inc esi |
inc ebx |
dec ecx |
jnz secondloop8 |
end8: |
emms |
} |
} |
else /* mmx not supported - use modified C routine */ |
{ |
register unsigned int incr1, initial_val, final_val; |
png_size_t pixel_bytes; |
png_uint_32 i; |
register int disp = png_pass_inc[png_ptr->pass]; |
int offset_table[7] = {0, 4, 0, 2, 0, 1, 0}; |
pixel_bytes = (png_ptr->row_info.pixel_depth >> 3); |
srcptr = png_ptr->row_buf + 1 + offset_table[png_ptr->pass]* |
pixel_bytes; |
dstptr = row + offset_table[png_ptr->pass]*pixel_bytes; |
initial_val = offset_table[png_ptr->pass]*pixel_bytes; |
final_val = png_ptr->width*pixel_bytes; |
incr1 = (disp)*pixel_bytes; |
for (i = initial_val; i < final_val; i += incr1) |
{ |
png_memcpy(dstptr, srcptr, pixel_bytes); |
srcptr += incr1; |
dstptr += incr1; |
} |
} /* end of else */ |
break; |
} // end 8 bpp |
case 16: |
{ |
png_bytep srcptr; |
png_bytep dstptr; |
png_uint_32 len; |
int unmask, diff; |
__int64 mask1=0x0101020204040808, |
mask0=0x1010202040408080; |
if ((png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_COMBINE_ROW) |
/* && mmx_supported */ ) |
{ |
srcptr = png_ptr->row_buf + 1; |
dstptr = row; |
unmask = ~mask; |
len = (png_ptr->width)&~7; |
diff = (png_ptr->width)&7; |
_asm |
{ |
movd mm7, unmask //load bit pattern |
psubb mm6,mm6 //zero mm6 |
punpcklbw mm7,mm7 |
punpcklwd mm7,mm7 |
punpckldq mm7,mm7 //fill register with 8 masks |
movq mm0,mask0 |
movq mm1,mask1 |
pand mm0,mm7 |
pand mm1,mm7 |
pcmpeqb mm0,mm6 |
pcmpeqb mm1,mm6 |
mov ecx,len //load length of line |
mov esi,srcptr //load source |
mov ebx,dstptr //load dest |
cmp ecx,0 //lcr |
jz mainloop16end |
mainloop16: |
movq mm4,[esi] |
pand mm4,mm0 |
movq mm6,mm0 |
movq mm7,[ebx] |
pandn mm6,mm7 |
por mm4,mm6 |
movq [ebx],mm4 |
movq mm5,[esi+8] |
pand mm5,mm1 |
movq mm7,mm1 |
movq mm6,[ebx+8] |
pandn mm7,mm6 |
por mm5,mm7 |
movq [ebx+8],mm5 |
add esi,16 //inc by 16 bytes processed |
add ebx,16 |
sub ecx,8 //dec by 8 pixels processed |
ja mainloop16 |
mainloop16end: |
mov ecx,diff |
cmp ecx,0 |
jz end16 |
mov edx,mask |
sal edx,24 //make low byte the high byte |
secondloop16: |
sal edx,1 //move high bit to CF |
jnc skip16 //if CF = 0 |
mov ax,[esi] |
mov [ebx],ax |
skip16: |
add esi,2 |
add ebx,2 |
dec ecx |
jnz secondloop16 |
end16: |
emms |
} |
} |
else /* mmx not supported - use modified C routine */ |
{ |
register unsigned int incr1, initial_val, final_val; |
png_size_t pixel_bytes; |
png_uint_32 i; |
register int disp = png_pass_inc[png_ptr->pass]; |
int offset_table[7] = {0, 4, 0, 2, 0, 1, 0}; |
pixel_bytes = (png_ptr->row_info.pixel_depth >> 3); |
srcptr = png_ptr->row_buf + 1 + offset_table[png_ptr->pass]* |
pixel_bytes; |
dstptr = row + offset_table[png_ptr->pass]*pixel_bytes; |
initial_val = offset_table[png_ptr->pass]*pixel_bytes; |
final_val = png_ptr->width*pixel_bytes; |
incr1 = (disp)*pixel_bytes; |
for (i = initial_val; i < final_val; i += incr1) |
{ |
png_memcpy(dstptr, srcptr, pixel_bytes); |
srcptr += incr1; |
dstptr += incr1; |
} |
} /* end of else */ |
break; |
} // end 16 bpp |
case 24: |
{ |
png_bytep srcptr; |
png_bytep dstptr; |
png_uint_32 len; |
int unmask, diff; |
__int64 mask2=0x0101010202020404, //24bpp |
mask1=0x0408080810101020, |
mask0=0x2020404040808080; |
srcptr = png_ptr->row_buf + 1; |
dstptr = row; |
unmask = ~mask; |
len = (png_ptr->width)&~7; |
diff = (png_ptr->width)&7; |
if ((png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_COMBINE_ROW) |
/* && mmx_supported */ ) |
{ |
_asm |
{ |
movd mm7, unmask //load bit pattern |
psubb mm6,mm6 //zero mm6 |
punpcklbw mm7,mm7 |
punpcklwd mm7,mm7 |
punpckldq mm7,mm7 //fill register with 8 masks |
movq mm0,mask0 |
movq mm1,mask1 |
movq mm2,mask2 |
pand mm0,mm7 |
pand mm1,mm7 |
pand mm2,mm7 |
pcmpeqb mm0,mm6 |
pcmpeqb mm1,mm6 |
pcmpeqb mm2,mm6 |
mov ecx,len //load length of line |
mov esi,srcptr //load source |
mov ebx,dstptr //load dest |
cmp ecx,0 |
jz mainloop24end |
mainloop24: |
movq mm4,[esi] |
pand mm4,mm0 |
movq mm6,mm0 |
movq mm7,[ebx] |
pandn mm6,mm7 |
por mm4,mm6 |
movq [ebx],mm4 |
movq mm5,[esi+8] |
pand mm5,mm1 |
movq mm7,mm1 |
movq mm6,[ebx+8] |
pandn mm7,mm6 |
por mm5,mm7 |
movq [ebx+8],mm5 |
movq mm6,[esi+16] |
pand mm6,mm2 |
movq mm4,mm2 |
movq mm7,[ebx+16] |
pandn mm4,mm7 |
por mm6,mm4 |
movq [ebx+16],mm6 |
add esi,24 //inc by 24 bytes processed |
add ebx,24 |
sub ecx,8 //dec by 8 pixels processed |
ja mainloop24 |
mainloop24end: |
mov ecx,diff |
cmp ecx,0 |
jz end24 |
mov edx,mask |
sal edx,24 //make low byte the high byte |
secondloop24: |
sal edx,1 //move high bit to CF |
jnc skip24 //if CF = 0 |
mov ax,[esi] |
mov [ebx],ax |
xor eax,eax |
mov al,[esi+2] |
mov [ebx+2],al |
skip24: |
add esi,3 |
add ebx,3 |
dec ecx |
jnz secondloop24 |
end24: |
emms |
} |
} |
else /* mmx not supported - use modified C routine */ |
{ |
register unsigned int incr1, initial_val, final_val; |
png_size_t pixel_bytes; |
png_uint_32 i; |
register int disp = png_pass_inc[png_ptr->pass]; |
int offset_table[7] = {0, 4, 0, 2, 0, 1, 0}; |
pixel_bytes = (png_ptr->row_info.pixel_depth >> 3); |
srcptr = png_ptr->row_buf + 1 + offset_table[png_ptr->pass]* |
pixel_bytes; |
dstptr = row + offset_table[png_ptr->pass]*pixel_bytes; |
initial_val = offset_table[png_ptr->pass]*pixel_bytes; |
final_val = png_ptr->width*pixel_bytes; |
incr1 = (disp)*pixel_bytes; |
for (i = initial_val; i < final_val; i += incr1) |
{ |
png_memcpy(dstptr, srcptr, pixel_bytes); |
srcptr += incr1; |
dstptr += incr1; |
} |
} /* end of else */ |
break; |
} // end 24 bpp |
case 32: |
{ |
png_bytep srcptr; |
png_bytep dstptr; |
png_uint_32 len; |
int unmask, diff; |
__int64 mask3=0x0101010102020202, //32bpp |
mask2=0x0404040408080808, |
mask1=0x1010101020202020, |
mask0=0x4040404080808080; |
srcptr = png_ptr->row_buf + 1; |
dstptr = row; |
unmask = ~mask; |
len = (png_ptr->width)&~7; |
diff = (png_ptr->width)&7; |
if ((png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_COMBINE_ROW) |
/* && mmx_supported */ ) |
{ |
_asm |
{ |
movd mm7, unmask //load bit pattern |
psubb mm6,mm6 //zero mm6 |
punpcklbw mm7,mm7 |
punpcklwd mm7,mm7 |
punpckldq mm7,mm7 //fill register with 8 masks |
movq mm0,mask0 |
movq mm1,mask1 |
movq mm2,mask2 |
movq mm3,mask3 |
pand mm0,mm7 |
pand mm1,mm7 |
pand mm2,mm7 |
pand mm3,mm7 |
pcmpeqb mm0,mm6 |
pcmpeqb mm1,mm6 |
pcmpeqb mm2,mm6 |
pcmpeqb mm3,mm6 |
mov ecx,len //load length of line |
mov esi,srcptr //load source |
mov ebx,dstptr //load dest |
cmp ecx,0 //lcr |
jz mainloop32end |
mainloop32: |
movq mm4,[esi] |
pand mm4,mm0 |
movq mm6,mm0 |
movq mm7,[ebx] |
pandn mm6,mm7 |
por mm4,mm6 |
movq [ebx],mm4 |
movq mm5,[esi+8] |
pand mm5,mm1 |
movq mm7,mm1 |
movq mm6,[ebx+8] |
pandn mm7,mm6 |
por mm5,mm7 |
movq [ebx+8],mm5 |
movq mm6,[esi+16] |
pand mm6,mm2 |
movq mm4,mm2 |
movq mm7,[ebx+16] |
pandn mm4,mm7 |
por mm6,mm4 |
movq [ebx+16],mm6 |
movq mm7,[esi+24] |
pand mm7,mm3 |
movq mm5,mm3 |
movq mm4,[ebx+24] |
pandn mm5,mm4 |
por mm7,mm5 |
movq [ebx+24],mm7 |
add esi,32 //inc by 32 bytes processed |
add ebx,32 |
sub ecx,8 //dec by 8 pixels processed |
ja mainloop32 |
mainloop32end: |
mov ecx,diff |
cmp ecx,0 |
jz end32 |
mov edx,mask |
sal edx,24 //make low byte the high byte |
secondloop32: |
sal edx,1 //move high bit to CF |
jnc skip32 //if CF = 0 |
mov eax,[esi] |
mov [ebx],eax |
skip32: |
add esi,4 |
add ebx,4 |
dec ecx |
jnz secondloop32 |
end32: |
emms |
} |
} |
else /* mmx _not supported - Use modified C routine */ |
{ |
register unsigned int incr1, initial_val, final_val; |
png_size_t pixel_bytes; |
png_uint_32 i; |
register int disp = png_pass_inc[png_ptr->pass]; |
int offset_table[7] = {0, 4, 0, 2, 0, 1, 0}; |
pixel_bytes = (png_ptr->row_info.pixel_depth >> 3); |
srcptr = png_ptr->row_buf + 1 + offset_table[png_ptr->pass]* |
pixel_bytes; |
dstptr = row + offset_table[png_ptr->pass]*pixel_bytes; |
initial_val = offset_table[png_ptr->pass]*pixel_bytes; |
final_val = png_ptr->width*pixel_bytes; |
incr1 = (disp)*pixel_bytes; |
for (i = initial_val; i < final_val; i += incr1) |
{ |
png_memcpy(dstptr, srcptr, pixel_bytes); |
srcptr += incr1; |
dstptr += incr1; |
} |
} /* end of else */ |
break; |
} // end 32 bpp |
case 48: |
{ |
png_bytep srcptr; |
png_bytep dstptr; |
png_uint_32 len; |
int unmask, diff; |
__int64 mask5=0x0101010101010202, |
mask4=0x0202020204040404, |
mask3=0x0404080808080808, |
mask2=0x1010101010102020, |
mask1=0x2020202040404040, |
mask0=0x4040808080808080; |
if ((png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_COMBINE_ROW) |
/* && mmx_supported */ ) |
{ |
srcptr = png_ptr->row_buf + 1; |
dstptr = row; |
unmask = ~mask; |
len = (png_ptr->width)&~7; |
diff = (png_ptr->width)&7; |
_asm |
{ |
movd mm7, unmask //load bit pattern |
psubb mm6,mm6 //zero mm6 |
punpcklbw mm7,mm7 |
punpcklwd mm7,mm7 |
punpckldq mm7,mm7 //fill register with 8 masks |
movq mm0,mask0 |
movq mm1,mask1 |
movq mm2,mask2 |
movq mm3,mask3 |
movq mm4,mask4 |
movq mm5,mask5 |
pand mm0,mm7 |
pand mm1,mm7 |
pand mm2,mm7 |
pand mm3,mm7 |
pand mm4,mm7 |
pand mm5,mm7 |
pcmpeqb mm0,mm6 |
pcmpeqb mm1,mm6 |
pcmpeqb mm2,mm6 |
pcmpeqb mm3,mm6 |
pcmpeqb mm4,mm6 |
pcmpeqb mm5,mm6 |
mov ecx,len //load length of line |
mov esi,srcptr //load source |
mov ebx,dstptr //load dest |
cmp ecx,0 |
jz mainloop48end |
mainloop48: |
movq mm7,[esi] |
pand mm7,mm0 |
movq mm6,mm0 |
pandn mm6,[ebx] |
por mm7,mm6 |
movq [ebx],mm7 |
movq mm6,[esi+8] |
pand mm6,mm1 |
movq mm7,mm1 |
pandn mm7,[ebx+8] |
por mm6,mm7 |
movq [ebx+8],mm6 |
movq mm6,[esi+16] |
pand mm6,mm2 |
movq mm7,mm2 |
pandn mm7,[ebx+16] |
por mm6,mm7 |
movq [ebx+16],mm6 |
movq mm7,[esi+24] |
pand mm7,mm3 |
movq mm6,mm3 |
pandn mm6,[ebx+24] |
por mm7,mm6 |
movq [ebx+24],mm7 |
movq mm6,[esi+32] |
pand mm6,mm4 |
movq mm7,mm4 |
pandn mm7,[ebx+32] |
por mm6,mm7 |
movq [ebx+32],mm6 |
movq mm7,[esi+40] |
pand mm7,mm5 |
movq mm6,mm5 |
pandn mm6,[ebx+40] |
por mm7,mm6 |
movq [ebx+40],mm7 |
add esi,48 //inc by 32 bytes processed |
add ebx,48 |
sub ecx,8 //dec by 8 pixels processed |
ja mainloop48 |
mainloop48end: |
mov ecx,diff |
cmp ecx,0 |
jz end48 |
mov edx,mask |
sal edx,24 //make low byte the high byte |
secondloop48: |
sal edx,1 //move high bit to CF |
jnc skip48 //if CF = 0 |
mov eax,[esi] |
mov [ebx],eax |
skip48: |
add esi,4 |
add ebx,4 |
dec ecx |
jnz secondloop48 |
end48: |
emms |
} |
} |
else /* mmx _not supported - Use modified C routine */ |
{ |
register unsigned int incr1, initial_val, final_val; |
png_size_t pixel_bytes; |
png_uint_32 i; |
register int disp = png_pass_inc[png_ptr->pass]; |
int offset_table[7] = {0, 4, 0, 2, 0, 1, 0}; |
pixel_bytes = (png_ptr->row_info.pixel_depth >> 3); |
srcptr = png_ptr->row_buf + 1 + offset_table[png_ptr->pass]* |
pixel_bytes; |
dstptr = row + offset_table[png_ptr->pass]*pixel_bytes; |
initial_val = offset_table[png_ptr->pass]*pixel_bytes; |
final_val = png_ptr->width*pixel_bytes; |
incr1 = (disp)*pixel_bytes; |
for (i = initial_val; i < final_val; i += incr1) |
{ |
png_memcpy(dstptr, srcptr, pixel_bytes); |
srcptr += incr1; |
dstptr += incr1; |
} |
} /* end of else */ |
break; |
} // end 48 bpp |
default: |
{ |
png_bytep sptr; |
png_bytep dp; |
png_size_t pixel_bytes; |
int offset_table[7] = {0, 4, 0, 2, 0, 1, 0}; |
unsigned int i; |
register int disp = png_pass_inc[png_ptr->pass]; // get the offset |
register unsigned int incr1, initial_val, final_val; |
pixel_bytes = (png_ptr->row_info.pixel_depth >> 3); |
sptr = png_ptr->row_buf + 1 + offset_table[png_ptr->pass]* |
pixel_bytes; |
dp = row + offset_table[png_ptr->pass]*pixel_bytes; |
initial_val = offset_table[png_ptr->pass]*pixel_bytes; |
final_val = png_ptr->width*pixel_bytes; |
incr1 = (disp)*pixel_bytes; |
for (i = initial_val; i < final_val; i += incr1) |
{ |
png_memcpy(dp, sptr, pixel_bytes); |
sptr += incr1; |
dp += incr1; |
} |
break; |
} |
} /* end switch (png_ptr->row_info.pixel_depth) */ |
} /* end if (non-trivial mask) */ |
} /* end png_combine_row() */ |
#if defined(PNG_READ_INTERLACING_SUPPORTED) |
void /* PRIVATE */ |
png_do_read_interlace(png_structp png_ptr) |
{ |
png_row_infop row_info = &(png_ptr->row_info); |
png_bytep row = png_ptr->row_buf + 1; |
int pass = png_ptr->pass; |
png_uint_32 transformations = png_ptr->transformations; |
#ifdef PNG_USE_LOCAL_ARRAYS |
const int png_pass_inc[7] = {8, 8, 4, 4, 2, 2, 1}; |
#endif |
png_debug(1,"in png_do_read_interlace\n"); |
if (mmx_supported == 2) { |
/* this should have happened in png_init_mmx_flags() already */ |
png_warning(png_ptr, "asm_flags may not have been initialized"); |
png_mmx_support(); |
} |
if (row != NULL && row_info != NULL) |
{ |
png_uint_32 final_width; |
final_width = row_info->width * png_pass_inc[pass]; |
switch (row_info->pixel_depth) |
{ |
case 1: |
{ |
png_bytep sp, dp; |
int sshift, dshift; |
int s_start, s_end, s_inc; |
png_byte v; |
png_uint_32 i; |
int j; |
sp = row + (png_size_t)((row_info->width - 1) >> 3); |
dp = row + (png_size_t)((final_width - 1) >> 3); |
#if defined(PNG_READ_PACKSWAP_SUPPORTED) |
if (transformations & PNG_PACKSWAP) |
{ |
sshift = (int)((row_info->width + 7) & 7); |
dshift = (int)((final_width + 7) & 7); |
s_start = 7; |
s_end = 0; |
s_inc = -1; |
} |
else |
#endif |
{ |
sshift = 7 - (int)((row_info->width + 7) & 7); |
dshift = 7 - (int)((final_width + 7) & 7); |
s_start = 0; |
s_end = 7; |
s_inc = 1; |
} |
for (i = row_info->width; i; i--) |
{ |
v = (png_byte)((*sp >> sshift) & 0x1); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
*dp &= (png_byte)((0x7f7f >> (7 - dshift)) & 0xff); |
*dp |= (png_byte)(v << dshift); |
if (dshift == s_end) |
{ |
dshift = s_start; |
dp--; |
} |
else |
dshift += s_inc; |
} |
if (sshift == s_end) |
{ |
sshift = s_start; |
sp--; |
} |
else |
sshift += s_inc; |
} |
break; |
} |
case 2: |
{ |
png_bytep sp, dp; |
int sshift, dshift; |
int s_start, s_end, s_inc; |
png_uint_32 i; |
sp = row + (png_size_t)((row_info->width - 1) >> 2); |
dp = row + (png_size_t)((final_width - 1) >> 2); |
#if defined(PNG_READ_PACKSWAP_SUPPORTED) |
if (transformations & PNG_PACKSWAP) |
{ |
sshift = (png_size_t)(((row_info->width + 3) & 3) << 1); |
dshift = (png_size_t)(((final_width + 3) & 3) << 1); |
s_start = 6; |
s_end = 0; |
s_inc = -2; |
} |
else |
#endif |
{ |
sshift = (png_size_t)((3 - ((row_info->width + 3) & 3)) << 1); |
dshift = (png_size_t)((3 - ((final_width + 3) & 3)) << 1); |
s_start = 0; |
s_end = 6; |
s_inc = 2; |
} |
for (i = row_info->width; i; i--) |
{ |
png_byte v; |
int j; |
v = (png_byte)((*sp >> sshift) & 0x3); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
*dp &= (png_byte)((0x3f3f >> (6 - dshift)) & 0xff); |
*dp |= (png_byte)(v << dshift); |
if (dshift == s_end) |
{ |
dshift = s_start; |
dp--; |
} |
else |
dshift += s_inc; |
} |
if (sshift == s_end) |
{ |
sshift = s_start; |
sp--; |
} |
else |
sshift += s_inc; |
} |
break; |
} |
case 4: |
{ |
png_bytep sp, dp; |
int sshift, dshift; |
int s_start, s_end, s_inc; |
png_uint_32 i; |
sp = row + (png_size_t)((row_info->width - 1) >> 1); |
dp = row + (png_size_t)((final_width - 1) >> 1); |
#if defined(PNG_READ_PACKSWAP_SUPPORTED) |
if (transformations & PNG_PACKSWAP) |
{ |
sshift = (png_size_t)(((row_info->width + 1) & 1) << 2); |
dshift = (png_size_t)(((final_width + 1) & 1) << 2); |
s_start = 4; |
s_end = 0; |
s_inc = -4; |
} |
else |
#endif |
{ |
sshift = (png_size_t)((1 - ((row_info->width + 1) & 1)) << 2); |
dshift = (png_size_t)((1 - ((final_width + 1) & 1)) << 2); |
s_start = 0; |
s_end = 4; |
s_inc = 4; |
} |
for (i = row_info->width; i; i--) |
{ |
png_byte v; |
int j; |
v = (png_byte)((*sp >> sshift) & 0xf); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
*dp &= (png_byte)((0xf0f >> (4 - dshift)) & 0xff); |
*dp |= (png_byte)(v << dshift); |
if (dshift == s_end) |
{ |
dshift = s_start; |
dp--; |
} |
else |
dshift += s_inc; |
} |
if (sshift == s_end) |
{ |
sshift = s_start; |
sp--; |
} |
else |
sshift += s_inc; |
} |
break; |
} |
default: // This is the place where the routine is modified |
{ |
__int64 const4 = 0x0000000000FFFFFF; |
// __int64 const5 = 0x000000FFFFFF0000; // unused... |
__int64 const6 = 0x00000000000000FF; |
png_bytep sptr, dp; |
png_uint_32 i; |
png_size_t pixel_bytes; |
int width = row_info->width; |
pixel_bytes = (row_info->pixel_depth >> 3); |
sptr = row + (width - 1) * pixel_bytes; |
dp = row + (final_width - 1) * pixel_bytes; |
// New code by Nirav Chhatrapati - Intel Corporation |
// sign fix by GRR |
// NOTE: there is NO MMX code for 48-bit and 64-bit images |
// use MMX routine if machine supports it |
if ((png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_INTERLACE) |
/* && mmx_supported */ ) |
{ |
if (pixel_bytes == 3) |
{ |
if (((pass == 0) || (pass == 1)) && width) |
{ |
_asm |
{ |
mov esi, sptr |
mov edi, dp |
mov ecx, width |
sub edi, 21 // (png_pass_inc[pass] - 1)*pixel_bytes |
loop_pass0: |
movd mm0, [esi] ; X X X X X v2 v1 v0 |
pand mm0, const4 ; 0 0 0 0 0 v2 v1 v0 |
movq mm1, mm0 ; 0 0 0 0 0 v2 v1 v0 |
psllq mm0, 16 ; 0 0 0 v2 v1 v0 0 0 |
movq mm2, mm0 ; 0 0 0 v2 v1 v0 0 0 |
psllq mm0, 24 ; v2 v1 v0 0 0 0 0 0 |
psrlq mm1, 8 ; 0 0 0 0 0 0 v2 v1 |
por mm0, mm2 ; v2 v1 v0 v2 v1 v0 0 0 |
por mm0, mm1 ; v2 v1 v0 v2 v1 v0 v2 v1 |
movq mm3, mm0 ; v2 v1 v0 v2 v1 v0 v2 v1 |
psllq mm0, 16 ; v0 v2 v1 v0 v2 v1 0 0 |
movq mm4, mm3 ; v2 v1 v0 v2 v1 v0 v2 v1 |
punpckhdq mm3, mm0 ; v0 v2 v1 v0 v2 v1 v0 v2 |
movq [edi+16] , mm4 |
psrlq mm0, 32 ; 0 0 0 0 v0 v2 v1 v0 |
movq [edi+8] , mm3 |
punpckldq mm0, mm4 ; v1 v0 v2 v1 v0 v2 v1 v0 |
sub esi, 3 |
movq [edi], mm0 |
sub edi, 24 |
//sub esi, 3 |
dec ecx |
jnz loop_pass0 |
EMMS |
} |
} |
else if (((pass == 2) || (pass == 3)) && width) |
{ |
_asm |
{ |
mov esi, sptr |
mov edi, dp |
mov ecx, width |
sub edi, 9 // (png_pass_inc[pass] - 1)*pixel_bytes |
loop_pass2: |
movd mm0, [esi] ; X X X X X v2 v1 v0 |
pand mm0, const4 ; 0 0 0 0 0 v2 v1 v0 |
movq mm1, mm0 ; 0 0 0 0 0 v2 v1 v0 |
psllq mm0, 16 ; 0 0 0 v2 v1 v0 0 0 |
movq mm2, mm0 ; 0 0 0 v2 v1 v0 0 0 |
psllq mm0, 24 ; v2 v1 v0 0 0 0 0 0 |
psrlq mm1, 8 ; 0 0 0 0 0 0 v2 v1 |
por mm0, mm2 ; v2 v1 v0 v2 v1 v0 0 0 |
por mm0, mm1 ; v2 v1 v0 v2 v1 v0 v2 v1 |
movq [edi+4], mm0 ; move to memory |
psrlq mm0, 16 ; 0 0 v2 v1 v0 v2 v1 v0 |
movd [edi], mm0 ; move to memory |
sub esi, 3 |
sub edi, 12 |
dec ecx |
jnz loop_pass2 |
EMMS |
} |
} |
else if (width) /* && ((pass == 4) || (pass == 5)) */ |
{ |
int width_mmx = ((width >> 1) << 1) - 8; |
if (width_mmx < 0) |
width_mmx = 0; |
width -= width_mmx; // 8 or 9 pix, 24 or 27 bytes |
if (width_mmx) |
{ |
_asm |
{ |
mov esi, sptr |
mov edi, dp |
mov ecx, width_mmx |
sub esi, 3 |
sub edi, 9 |
loop_pass4: |
movq mm0, [esi] ; X X v2 v1 v0 v5 v4 v3 |
movq mm7, mm0 ; X X v2 v1 v0 v5 v4 v3 |
movq mm6, mm0 ; X X v2 v1 v0 v5 v4 v3 |
psllq mm0, 24 ; v1 v0 v5 v4 v3 0 0 0 |
pand mm7, const4 ; 0 0 0 0 0 v5 v4 v3 |
psrlq mm6, 24 ; 0 0 0 X X v2 v1 v0 |
por mm0, mm7 ; v1 v0 v5 v4 v3 v5 v4 v3 |
movq mm5, mm6 ; 0 0 0 X X v2 v1 v0 |
psllq mm6, 8 ; 0 0 X X v2 v1 v0 0 |
movq [edi], mm0 ; move quad to memory |
psrlq mm5, 16 ; 0 0 0 0 0 X X v2 |
pand mm5, const6 ; 0 0 0 0 0 0 0 v2 |
por mm6, mm5 ; 0 0 X X v2 v1 v0 v2 |
movd [edi+8], mm6 ; move double to memory |
sub esi, 6 |
sub edi, 12 |
sub ecx, 2 |
jnz loop_pass4 |
EMMS |
} |
} |
sptr -= width_mmx*3; |
dp -= width_mmx*6; |
for (i = width; i; i--) |
{ |
png_byte v[8]; |
int j; |
png_memcpy(v, sptr, 3); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
png_memcpy(dp, v, 3); |
dp -= 3; |
} |
sptr -= 3; |
} |
} |
} /* end of pixel_bytes == 3 */ |
else if (pixel_bytes == 1) |
{ |
if (((pass == 0) || (pass == 1)) && width) |
{ |
int width_mmx = ((width >> 2) << 2); |
width -= width_mmx; |
if (width_mmx) |
{ |
_asm |
{ |
mov esi, sptr |
mov edi, dp |
mov ecx, width_mmx |
sub edi, 31 |
sub esi, 3 |
loop1_pass0: |
movd mm0, [esi] ; X X X X v0 v1 v2 v3 |
movq mm1, mm0 ; X X X X v0 v1 v2 v3 |
punpcklbw mm0, mm0 ; v0 v0 v1 v1 v2 v2 v3 v3 |
movq mm2, mm0 ; v0 v0 v1 v1 v2 v2 v3 v3 |
punpcklwd mm0, mm0 ; v2 v2 v2 v2 v3 v3 v3 v3 |
movq mm3, mm0 ; v2 v2 v2 v2 v3 v3 v3 v3 |
punpckldq mm0, mm0 ; v3 v3 v3 v3 v3 v3 v3 v3 |
punpckhdq mm3, mm3 ; v2 v2 v2 v2 v2 v2 v2 v2 |
movq [edi], mm0 ; move to memory v3 |
punpckhwd mm2, mm2 ; v0 v0 v0 v0 v1 v1 v1 v1 |
movq [edi+8], mm3 ; move to memory v2 |
movq mm4, mm2 ; v0 v0 v0 v0 v1 v1 v1 v1 |
punpckldq mm2, mm2 ; v1 v1 v1 v1 v1 v1 v1 v1 |
punpckhdq mm4, mm4 ; v0 v0 v0 v0 v0 v0 v0 v0 |
movq [edi+16], mm2 ; move to memory v1 |
movq [edi+24], mm4 ; move to memory v0 |
sub esi, 4 |
sub edi, 32 |
sub ecx, 4 |
jnz loop1_pass0 |
EMMS |
} |
} |
sptr -= width_mmx; |
dp -= width_mmx*8; |
for (i = width; i; i--) |
{ |
int j; |
/* I simplified this part in version 1.0.4e |
* here and in several other instances where |
* pixel_bytes == 1 -- GR-P |
* |
* Original code: |
* |
* png_byte v[8]; |
* png_memcpy(v, sptr, pixel_bytes); |
* for (j = 0; j < png_pass_inc[pass]; j++) |
* { |
* png_memcpy(dp, v, pixel_bytes); |
* dp -= pixel_bytes; |
* } |
* sptr -= pixel_bytes; |
* |
* Replacement code is in the next three lines: |
*/ |
for (j = 0; j < png_pass_inc[pass]; j++) |
*dp-- = *sptr; |
sptr--; |
} |
} |
else if (((pass == 2) || (pass == 3)) && width) |
{ |
int width_mmx = ((width >> 2) << 2); |
width -= width_mmx; |
if (width_mmx) |
{ |
_asm |
{ |
mov esi, sptr |
mov edi, dp |
mov ecx, width_mmx |
sub edi, 15 |
sub esi, 3 |
loop1_pass2: |
movd mm0, [esi] ; X X X X v0 v1 v2 v3 |
punpcklbw mm0, mm0 ; v0 v0 v1 v1 v2 v2 v3 v3 |
movq mm1, mm0 ; v0 v0 v1 v1 v2 v2 v3 v3 |
punpcklwd mm0, mm0 ; v2 v2 v2 v2 v3 v3 v3 v3 |
punpckhwd mm1, mm1 ; v0 v0 v0 v0 v1 v1 v1 v1 |
movq [edi], mm0 ; move to memory v2 and v3 |
sub esi, 4 |
movq [edi+8], mm1 ; move to memory v1 and v0 |
sub edi, 16 |
sub ecx, 4 |
jnz loop1_pass2 |
EMMS |
} |
} |
sptr -= width_mmx; |
dp -= width_mmx*4; |
for (i = width; i; i--) |
{ |
int j; |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
*dp-- = *sptr; |
} |
sptr --; |
} |
} |
else if (width) /* && ((pass == 4) || (pass == 5))) */ |
{ |
int width_mmx = ((width >> 3) << 3); |
width -= width_mmx; |
if (width_mmx) |
{ |
_asm |
{ |
mov esi, sptr |
mov edi, dp |
mov ecx, width_mmx |
sub edi, 15 |
sub esi, 7 |
loop1_pass4: |
movq mm0, [esi] ; v0 v1 v2 v3 v4 v5 v6 v7 |
movq mm1, mm0 ; v0 v1 v2 v3 v4 v5 v6 v7 |
punpcklbw mm0, mm0 ; v4 v4 v5 v5 v6 v6 v7 v7 |
//movq mm1, mm0 ; v0 v0 v1 v1 v2 v2 v3 v3 |
punpckhbw mm1, mm1 ;v0 v0 v1 v1 v2 v2 v3 v3 |
movq [edi+8], mm1 ; move to memory v0 v1 v2 and v3 |
sub esi, 8 |
movq [edi], mm0 ; move to memory v4 v5 v6 and v7 |
//sub esi, 4 |
sub edi, 16 |
sub ecx, 8 |
jnz loop1_pass4 |
EMMS |
} |
} |
sptr -= width_mmx; |
dp -= width_mmx*2; |
for (i = width; i; i--) |
{ |
int j; |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
*dp-- = *sptr; |
} |
sptr --; |
} |
} |
} /* end of pixel_bytes == 1 */ |
else if (pixel_bytes == 2) |
{ |
if (((pass == 0) || (pass == 1)) && width) |
{ |
int width_mmx = ((width >> 1) << 1); |
width -= width_mmx; |
if (width_mmx) |
{ |
_asm |
{ |
mov esi, sptr |
mov edi, dp |
mov ecx, width_mmx |
sub esi, 2 |
sub edi, 30 |
loop2_pass0: |
movd mm0, [esi] ; X X X X v1 v0 v3 v2 |
punpcklwd mm0, mm0 ; v1 v0 v1 v0 v3 v2 v3 v2 |
movq mm1, mm0 ; v1 v0 v1 v0 v3 v2 v3 v2 |
punpckldq mm0, mm0 ; v3 v2 v3 v2 v3 v2 v3 v2 |
punpckhdq mm1, mm1 ; v1 v0 v1 v0 v1 v0 v1 v0 |
movq [edi], mm0 |
movq [edi + 8], mm0 |
movq [edi + 16], mm1 |
movq [edi + 24], mm1 |
sub esi, 4 |
sub edi, 32 |
sub ecx, 2 |
jnz loop2_pass0 |
EMMS |
} |
} |
sptr -= (width_mmx*2 - 2); // sign fixed |
dp -= (width_mmx*16 - 2); // sign fixed |
for (i = width; i; i--) |
{ |
png_byte v[8]; |
int j; |
sptr -= 2; |
png_memcpy(v, sptr, 2); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
dp -= 2; |
png_memcpy(dp, v, 2); |
} |
} |
} |
else if (((pass == 2) || (pass == 3)) && width) |
{ |
int width_mmx = ((width >> 1) << 1) ; |
width -= width_mmx; |
if (width_mmx) |
{ |
_asm |
{ |
mov esi, sptr |
mov edi, dp |
mov ecx, width_mmx |
sub esi, 2 |
sub edi, 14 |
loop2_pass2: |
movd mm0, [esi] ; X X X X v1 v0 v3 v2 |
punpcklwd mm0, mm0 ; v1 v0 v1 v0 v3 v2 v3 v2 |
movq mm1, mm0 ; v1 v0 v1 v0 v3 v2 v3 v2 |
punpckldq mm0, mm0 ; v3 v2 v3 v2 v3 v2 v3 v2 |
punpckhdq mm1, mm1 ; v1 v0 v1 v0 v1 v0 v1 v0 |
movq [edi], mm0 |
sub esi, 4 |
movq [edi + 8], mm1 |
//sub esi, 4 |
sub edi, 16 |
sub ecx, 2 |
jnz loop2_pass2 |
EMMS |
} |
} |
sptr -= (width_mmx*2 - 2); // sign fixed |
dp -= (width_mmx*8 - 2); // sign fixed |
for (i = width; i; i--) |
{ |
png_byte v[8]; |
int j; |
sptr -= 2; |
png_memcpy(v, sptr, 2); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
dp -= 2; |
png_memcpy(dp, v, 2); |
} |
} |
} |
else if (width) // pass == 4 or 5 |
{ |
int width_mmx = ((width >> 1) << 1) ; |
width -= width_mmx; |
if (width_mmx) |
{ |
_asm |
{ |
mov esi, sptr |
mov edi, dp |
mov ecx, width_mmx |
sub esi, 2 |
sub edi, 6 |
loop2_pass4: |
movd mm0, [esi] ; X X X X v1 v0 v3 v2 |
punpcklwd mm0, mm0 ; v1 v0 v1 v0 v3 v2 v3 v2 |
sub esi, 4 |
movq [edi], mm0 |
sub edi, 8 |
sub ecx, 2 |
jnz loop2_pass4 |
EMMS |
} |
} |
sptr -= (width_mmx*2 - 2); // sign fixed |
dp -= (width_mmx*4 - 2); // sign fixed |
for (i = width; i; i--) |
{ |
png_byte v[8]; |
int j; |
sptr -= 2; |
png_memcpy(v, sptr, 2); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
dp -= 2; |
png_memcpy(dp, v, 2); |
} |
} |
} |
} /* end of pixel_bytes == 2 */ |
else if (pixel_bytes == 4) |
{ |
if (((pass == 0) || (pass == 1)) && width) |
{ |
int width_mmx = ((width >> 1) << 1) ; |
width -= width_mmx; |
if (width_mmx) |
{ |
_asm |
{ |
mov esi, sptr |
mov edi, dp |
mov ecx, width_mmx |
sub esi, 4 |
sub edi, 60 |
loop4_pass0: |
movq mm0, [esi] ; v3 v2 v1 v0 v7 v6 v5 v4 |
movq mm1, mm0 ; v3 v2 v1 v0 v7 v6 v5 v4 |
punpckldq mm0, mm0 ; v7 v6 v5 v4 v7 v6 v5 v4 |
punpckhdq mm1, mm1 ; v3 v2 v1 v0 v3 v2 v1 v0 |
movq [edi], mm0 |
movq [edi + 8], mm0 |
movq [edi + 16], mm0 |
movq [edi + 24], mm0 |
movq [edi+32], mm1 |
movq [edi + 40], mm1 |
movq [edi+ 48], mm1 |
sub esi, 8 |
movq [edi + 56], mm1 |
sub edi, 64 |
sub ecx, 2 |
jnz loop4_pass0 |
EMMS |
} |
} |
sptr -= (width_mmx*4 - 4); // sign fixed |
dp -= (width_mmx*32 - 4); // sign fixed |
for (i = width; i; i--) |
{ |
png_byte v[8]; |
int j; |
sptr -= 4; |
png_memcpy(v, sptr, 4); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
dp -= 4; |
png_memcpy(dp, v, 4); |
} |
} |
} |
else if (((pass == 2) || (pass == 3)) && width) |
{ |
int width_mmx = ((width >> 1) << 1) ; |
width -= width_mmx; |
if (width_mmx) |
{ |
_asm |
{ |
mov esi, sptr |
mov edi, dp |
mov ecx, width_mmx |
sub esi, 4 |
sub edi, 28 |
loop4_pass2: |
movq mm0, [esi] ; v3 v2 v1 v0 v7 v6 v5 v4 |
movq mm1, mm0 ; v3 v2 v1 v0 v7 v6 v5 v4 |
punpckldq mm0, mm0 ; v7 v6 v5 v4 v7 v6 v5 v4 |
punpckhdq mm1, mm1 ; v3 v2 v1 v0 v3 v2 v1 v0 |
movq [edi], mm0 |
movq [edi + 8], mm0 |
movq [edi+16], mm1 |
movq [edi + 24], mm1 |
sub esi, 8 |
sub edi, 32 |
sub ecx, 2 |
jnz loop4_pass2 |
EMMS |
} |
} |
sptr -= (width_mmx*4 - 4); // sign fixed |
dp -= (width_mmx*16 - 4); // sign fixed |
for (i = width; i; i--) |
{ |
png_byte v[8]; |
int j; |
sptr -= 4; |
png_memcpy(v, sptr, 4); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
dp -= 4; |
png_memcpy(dp, v, 4); |
} |
} |
} |
else if (width) // pass == 4 or 5 |
{ |
int width_mmx = ((width >> 1) << 1) ; |
width -= width_mmx; |
if (width_mmx) |
{ |
_asm |
{ |
mov esi, sptr |
mov edi, dp |
mov ecx, width_mmx |
sub esi, 4 |
sub edi, 12 |
loop4_pass4: |
movq mm0, [esi] ; v3 v2 v1 v0 v7 v6 v5 v4 |
movq mm1, mm0 ; v3 v2 v1 v0 v7 v6 v5 v4 |
punpckldq mm0, mm0 ; v7 v6 v5 v4 v7 v6 v5 v4 |
punpckhdq mm1, mm1 ; v3 v2 v1 v0 v3 v2 v1 v0 |
movq [edi], mm0 |
sub esi, 8 |
movq [edi + 8], mm1 |
sub edi, 16 |
sub ecx, 2 |
jnz loop4_pass4 |
EMMS |
} |
} |
sptr -= (width_mmx*4 - 4); // sign fixed |
dp -= (width_mmx*8 - 4); // sign fixed |
for (i = width; i; i--) |
{ |
png_byte v[8]; |
int j; |
sptr -= 4; |
png_memcpy(v, sptr, 4); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
dp -= 4; |
png_memcpy(dp, v, 4); |
} |
} |
} |
} /* end of pixel_bytes == 4 */ |
else if (pixel_bytes == 6) |
{ |
for (i = width; i; i--) |
{ |
png_byte v[8]; |
int j; |
png_memcpy(v, sptr, 6); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
png_memcpy(dp, v, 6); |
dp -= 6; |
} |
sptr -= 6; |
} |
} /* end of pixel_bytes == 6 */ |
else |
{ |
for (i = width; i; i--) |
{ |
png_byte v[8]; |
int j; |
png_memcpy(v, sptr, pixel_bytes); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
png_memcpy(dp, v, pixel_bytes); |
dp -= pixel_bytes; |
} |
sptr-= pixel_bytes; |
} |
} |
} /* end of mmx_supported */ |
else /* MMX not supported: use modified C code - takes advantage |
* of inlining of memcpy for a constant */ |
{ |
if (pixel_bytes == 1) |
{ |
for (i = width; i; i--) |
{ |
int j; |
for (j = 0; j < png_pass_inc[pass]; j++) |
*dp-- = *sptr; |
sptr--; |
} |
} |
else if (pixel_bytes == 3) |
{ |
for (i = width; i; i--) |
{ |
png_byte v[8]; |
int j; |
png_memcpy(v, sptr, pixel_bytes); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
png_memcpy(dp, v, pixel_bytes); |
dp -= pixel_bytes; |
} |
sptr -= pixel_bytes; |
} |
} |
else if (pixel_bytes == 2) |
{ |
for (i = width; i; i--) |
{ |
png_byte v[8]; |
int j; |
png_memcpy(v, sptr, pixel_bytes); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
png_memcpy(dp, v, pixel_bytes); |
dp -= pixel_bytes; |
} |
sptr -= pixel_bytes; |
} |
} |
else if (pixel_bytes == 4) |
{ |
for (i = width; i; i--) |
{ |
png_byte v[8]; |
int j; |
png_memcpy(v, sptr, pixel_bytes); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
png_memcpy(dp, v, pixel_bytes); |
dp -= pixel_bytes; |
} |
sptr -= pixel_bytes; |
} |
} |
else if (pixel_bytes == 6) |
{ |
for (i = width; i; i--) |
{ |
png_byte v[8]; |
int j; |
png_memcpy(v, sptr, pixel_bytes); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
png_memcpy(dp, v, pixel_bytes); |
dp -= pixel_bytes; |
} |
sptr -= pixel_bytes; |
} |
} |
else |
{ |
for (i = width; i; i--) |
{ |
png_byte v[8]; |
int j; |
png_memcpy(v, sptr, pixel_bytes); |
for (j = 0; j < png_pass_inc[pass]; j++) |
{ |
png_memcpy(dp, v, pixel_bytes); |
dp -= pixel_bytes; |
} |
sptr -= pixel_bytes; |
} |
} |
} /* end of MMX not supported */ |
break; |
} |
} /* end switch (row_info->pixel_depth) */ |
row_info->width = final_width; |
row_info->rowbytes = ((final_width * |
(png_uint_32)row_info->pixel_depth + 7) >> 3); |
} |
} |
#endif /* PNG_READ_INTERLACING_SUPPORTED */ |
// These variables are utilized in the functions below. They are declared |
// globally here to ensure alignment on 8-byte boundaries. |
union uAll { |
__int64 use; |
double align; |
} LBCarryMask = {0x0101010101010101}, |
HBClearMask = {0x7f7f7f7f7f7f7f7f}, |
ActiveMask, ActiveMask2, ActiveMaskEnd, ShiftBpp, ShiftRem; |
// Optimized code for PNG Average filter decoder |
void /* PRIVATE */ |
png_read_filter_row_mmx_avg(png_row_infop row_info, png_bytep row |
, png_bytep prev_row) |
{ |
int bpp; |
png_uint_32 FullLength; |
png_uint_32 MMXLength; |
//png_uint_32 len; |
int diff; |
bpp = (row_info->pixel_depth + 7) >> 3; // Get # bytes per pixel |
FullLength = row_info->rowbytes; // # of bytes to filter |
_asm { |
// Init address pointers and offset |
mov edi, row // edi ==> Avg(x) |
xor ebx, ebx // ebx ==> x |
mov edx, edi |
mov esi, prev_row // esi ==> Prior(x) |
sub edx, bpp // edx ==> Raw(x-bpp) |
xor eax, eax |
// Compute the Raw value for the first bpp bytes |
// Raw(x) = Avg(x) + (Prior(x)/2) |
davgrlp: |
mov al, [esi + ebx] // Load al with Prior(x) |
inc ebx |
shr al, 1 // divide by 2 |
add al, [edi+ebx-1] // Add Avg(x); -1 to offset inc ebx |
cmp ebx, bpp |
mov [edi+ebx-1], al // Write back Raw(x); |
// mov does not affect flags; -1 to offset inc ebx |
jb davgrlp |
// get # of bytes to alignment |
mov diff, edi // take start of row |
add diff, ebx // add bpp |
add diff, 0xf // add 7 + 8 to incr past alignment boundary |
and diff, 0xfffffff8 // mask to alignment boundary |
sub diff, edi // subtract from start ==> value ebx at alignment |
jz davggo |
// fix alignment |
// Compute the Raw value for the bytes upto the alignment boundary |
// Raw(x) = Avg(x) + ((Raw(x-bpp) + Prior(x))/2) |
xor ecx, ecx |
davglp1: |
xor eax, eax |
mov cl, [esi + ebx] // load cl with Prior(x) |
mov al, [edx + ebx] // load al with Raw(x-bpp) |
add ax, cx |
inc ebx |
shr ax, 1 // divide by 2 |
add al, [edi+ebx-1] // Add Avg(x); -1 to offset inc ebx |
cmp ebx, diff // Check if at alignment boundary |
mov [edi+ebx-1], al // Write back Raw(x); |
// mov does not affect flags; -1 to offset inc ebx |
jb davglp1 // Repeat until at alignment boundary |
davggo: |
mov eax, FullLength |
mov ecx, eax |
sub eax, ebx // subtract alignment fix |
and eax, 0x00000007 // calc bytes over mult of 8 |
sub ecx, eax // drop over bytes from original length |
mov MMXLength, ecx |
} // end _asm block |
// Now do the math for the rest of the row |
switch ( bpp ) |
{ |
case 3: |
{ |
ActiveMask.use = 0x0000000000ffffff; |
ShiftBpp.use = 24; // == 3 * 8 |
ShiftRem.use = 40; // == 64 - 24 |
_asm { |
// Re-init address pointers and offset |
movq mm7, ActiveMask |
mov ebx, diff // ebx ==> x = offset to alignment boundary |
movq mm5, LBCarryMask |
mov edi, row // edi ==> Avg(x) |
movq mm4, HBClearMask |
mov esi, prev_row // esi ==> Prior(x) |
// PRIME the pump (load the first Raw(x-bpp) data set |
movq mm2, [edi + ebx - 8] // Load previous aligned 8 bytes |
// (we correct position in loop below) |
davg3lp: |
movq mm0, [edi + ebx] // Load mm0 with Avg(x) |
// Add (Prev_row/2) to Average |
movq mm3, mm5 |
psrlq mm2, ShiftRem // Correct position Raw(x-bpp) data |
movq mm1, [esi + ebx] // Load mm1 with Prior(x) |
movq mm6, mm7 |
pand mm3, mm1 // get lsb for each prev_row byte |
psrlq mm1, 1 // divide prev_row bytes by 2 |
pand mm1, mm4 // clear invalid bit 7 of each byte |
paddb mm0, mm1 // add (Prev_row/2) to Avg for each byte |
// Add 1st active group (Raw(x-bpp)/2) to Average with LBCarry |
movq mm1, mm3 // now use mm1 for getting LBCarrys |
pand mm1, mm2 // get LBCarrys for each byte where both |
// lsb's were == 1 (Only valid for active group) |
psrlq mm2, 1 // divide raw bytes by 2 |
pand mm2, mm4 // clear invalid bit 7 of each byte |
paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte |
pand mm2, mm6 // Leave only Active Group 1 bytes to add to Avg |
paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active |
// byte |
// Add 2nd active group (Raw(x-bpp)/2) to Average with LBCarry |
psllq mm6, ShiftBpp // shift the mm6 mask to cover bytes 3-5 |
movq mm2, mm0 // mov updated Raws to mm2 |
psllq mm2, ShiftBpp // shift data to position correctly |
movq mm1, mm3 // now use mm1 for getting LBCarrys |
pand mm1, mm2 // get LBCarrys for each byte where both |
// lsb's were == 1 (Only valid for active group) |
psrlq mm2, 1 // divide raw bytes by 2 |
pand mm2, mm4 // clear invalid bit 7 of each byte |
paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte |
pand mm2, mm6 // Leave only Active Group 2 bytes to add to Avg |
paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active |
// byte |
// Add 3rd active group (Raw(x-bpp)/2) to Average with LBCarry |
psllq mm6, ShiftBpp // shift the mm6 mask to cover the last two |
// bytes |
movq mm2, mm0 // mov updated Raws to mm2 |
psllq mm2, ShiftBpp // shift data to position correctly |
// Data only needs to be shifted once here to |
// get the correct x-bpp offset. |
movq mm1, mm3 // now use mm1 for getting LBCarrys |
pand mm1, mm2 // get LBCarrys for each byte where both |
// lsb's were == 1 (Only valid for active group) |
psrlq mm2, 1 // divide raw bytes by 2 |
pand mm2, mm4 // clear invalid bit 7 of each byte |
paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte |
pand mm2, mm6 // Leave only Active Group 2 bytes to add to Avg |
add ebx, 8 |
paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active |
// byte |
// Now ready to write back to memory |
movq [edi + ebx - 8], mm0 |
// Move updated Raw(x) to use as Raw(x-bpp) for next loop |
cmp ebx, MMXLength |
movq mm2, mm0 // mov updated Raw(x) to mm2 |
jb davg3lp |
} // end _asm block |
} |
break; |
case 6: |
case 4: |
case 7: |
case 5: |
{ |
ActiveMask.use = 0xffffffffffffffff; // use shift below to clear |
// appropriate inactive bytes |
ShiftBpp.use = bpp << 3; |
ShiftRem.use = 64 - ShiftBpp.use; |
_asm { |
movq mm4, HBClearMask |
// Re-init address pointers and offset |
mov ebx, diff // ebx ==> x = offset to alignment boundary |
// Load ActiveMask and clear all bytes except for 1st active group |
movq mm7, ActiveMask |
mov edi, row // edi ==> Avg(x) |
psrlq mm7, ShiftRem |
mov esi, prev_row // esi ==> Prior(x) |
movq mm6, mm7 |
movq mm5, LBCarryMask |
psllq mm6, ShiftBpp // Create mask for 2nd active group |
// PRIME the pump (load the first Raw(x-bpp) data set |
movq mm2, [edi + ebx - 8] // Load previous aligned 8 bytes |
// (we correct position in loop below) |
davg4lp: |
movq mm0, [edi + ebx] |
psrlq mm2, ShiftRem // shift data to position correctly |
movq mm1, [esi + ebx] |
// Add (Prev_row/2) to Average |
movq mm3, mm5 |
pand mm3, mm1 // get lsb for each prev_row byte |
psrlq mm1, 1 // divide prev_row bytes by 2 |
pand mm1, mm4 // clear invalid bit 7 of each byte |
paddb mm0, mm1 // add (Prev_row/2) to Avg for each byte |
// Add 1st active group (Raw(x-bpp)/2) to Average with LBCarry |
movq mm1, mm3 // now use mm1 for getting LBCarrys |
pand mm1, mm2 // get LBCarrys for each byte where both |
// lsb's were == 1 (Only valid for active group) |
psrlq mm2, 1 // divide raw bytes by 2 |
pand mm2, mm4 // clear invalid bit 7 of each byte |
paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte |
pand mm2, mm7 // Leave only Active Group 1 bytes to add to Avg |
paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active |
// byte |
// Add 2nd active group (Raw(x-bpp)/2) to Average with LBCarry |
movq mm2, mm0 // mov updated Raws to mm2 |
psllq mm2, ShiftBpp // shift data to position correctly |
add ebx, 8 |
movq mm1, mm3 // now use mm1 for getting LBCarrys |
pand mm1, mm2 // get LBCarrys for each byte where both |
// lsb's were == 1 (Only valid for active group) |
psrlq mm2, 1 // divide raw bytes by 2 |
pand mm2, mm4 // clear invalid bit 7 of each byte |
paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte |
pand mm2, mm6 // Leave only Active Group 2 bytes to add to Avg |
paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active |
// byte |
cmp ebx, MMXLength |
// Now ready to write back to memory |
movq [edi + ebx - 8], mm0 |
// Prep Raw(x-bpp) for next loop |
movq mm2, mm0 // mov updated Raws to mm2 |
jb davg4lp |
} // end _asm block |
} |
break; |
case 2: |
{ |
ActiveMask.use = 0x000000000000ffff; |
ShiftBpp.use = 16; // == 2 * 8 [BUGFIX] |
ShiftRem.use = 48; // == 64 - 16 [BUGFIX] |
_asm { |
// Load ActiveMask |
movq mm7, ActiveMask |
// Re-init address pointers and offset |
mov ebx, diff // ebx ==> x = offset to alignment boundary |
movq mm5, LBCarryMask |
mov edi, row // edi ==> Avg(x) |
movq mm4, HBClearMask |
mov esi, prev_row // esi ==> Prior(x) |
// PRIME the pump (load the first Raw(x-bpp) data set |
movq mm2, [edi + ebx - 8] // Load previous aligned 8 bytes |
// (we correct position in loop below) |
davg2lp: |
movq mm0, [edi + ebx] |
psrlq mm2, ShiftRem // shift data to position correctly [BUGFIX] |
movq mm1, [esi + ebx] |
// Add (Prev_row/2) to Average |
movq mm3, mm5 |
pand mm3, mm1 // get lsb for each prev_row byte |
psrlq mm1, 1 // divide prev_row bytes by 2 |
pand mm1, mm4 // clear invalid bit 7 of each byte |
movq mm6, mm7 |
paddb mm0, mm1 // add (Prev_row/2) to Avg for each byte |
// Add 1st active group (Raw(x-bpp)/2) to Average with LBCarry |
movq mm1, mm3 // now use mm1 for getting LBCarrys |
pand mm1, mm2 // get LBCarrys for each byte where both |
// lsb's were == 1 (Only valid for active group) |
psrlq mm2, 1 // divide raw bytes by 2 |
pand mm2, mm4 // clear invalid bit 7 of each byte |
paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte |
pand mm2, mm6 // Leave only Active Group 1 bytes to add to Avg |
paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active byte |
// Add 2nd active group (Raw(x-bpp)/2) to Average with LBCarry |
psllq mm6, ShiftBpp // shift the mm6 mask to cover bytes 2 & 3 |
movq mm2, mm0 // mov updated Raws to mm2 |
psllq mm2, ShiftBpp // shift data to position correctly |
movq mm1, mm3 // now use mm1 for getting LBCarrys |
pand mm1, mm2 // get LBCarrys for each byte where both |
// lsb's were == 1 (Only valid for active group) |
psrlq mm2, 1 // divide raw bytes by 2 |
pand mm2, mm4 // clear invalid bit 7 of each byte |
paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte |
pand mm2, mm6 // Leave only Active Group 2 bytes to add to Avg |
paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active byte |
// Add rdd active group (Raw(x-bpp)/2) to Average with LBCarry |
psllq mm6, ShiftBpp // shift the mm6 mask to cover bytes 4 & 5 |
movq mm2, mm0 // mov updated Raws to mm2 |
psllq mm2, ShiftBpp // shift data to position correctly |
// Data only needs to be shifted once here to |
// get the correct x-bpp offset. |
movq mm1, mm3 // now use mm1 for getting LBCarrys |
pand mm1, mm2 // get LBCarrys for each byte where both |
// lsb's were == 1 (Only valid for active group) |
psrlq mm2, 1 // divide raw bytes by 2 |
pand mm2, mm4 // clear invalid bit 7 of each byte |
paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte |
pand mm2, mm6 // Leave only Active Group 2 bytes to add to Avg |
paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active byte |
// Add 4th active group (Raw(x-bpp)/2) to Average with LBCarry |
psllq mm6, ShiftBpp // shift the mm6 mask to cover bytes 6 & 7 |
movq mm2, mm0 // mov updated Raws to mm2 |
psllq mm2, ShiftBpp // shift data to position correctly |
// Data only needs to be shifted once here to |
// get the correct x-bpp offset. |
add ebx, 8 |
movq mm1, mm3 // now use mm1 for getting LBCarrys |
pand mm1, mm2 // get LBCarrys for each byte where both |
// lsb's were == 1 (Only valid for active group) |
psrlq mm2, 1 // divide raw bytes by 2 |
pand mm2, mm4 // clear invalid bit 7 of each byte |
paddb mm2, mm1 // add LBCarrys to (Raw(x-bpp)/2) for each byte |
pand mm2, mm6 // Leave only Active Group 2 bytes to add to Avg |
paddb mm0, mm2 // add (Raw/2) + LBCarrys to Avg for each Active byte |
cmp ebx, MMXLength |
// Now ready to write back to memory |
movq [edi + ebx - 8], mm0 |
// Prep Raw(x-bpp) for next loop |
movq mm2, mm0 // mov updated Raws to mm2 |
jb davg2lp |
} // end _asm block |
} |
break; |
case 1: // bpp == 1 |
{ |
_asm { |
// Re-init address pointers and offset |
mov ebx, diff // ebx ==> x = offset to alignment boundary |
mov edi, row // edi ==> Avg(x) |
cmp ebx, FullLength // Test if offset at end of array |
jnb davg1end |
// Do Paeth decode for remaining bytes |
mov esi, prev_row // esi ==> Prior(x) |
mov edx, edi |
xor ecx, ecx // zero ecx before using cl & cx in loop below |
sub edx, bpp // edx ==> Raw(x-bpp) |
davg1lp: |
// Raw(x) = Avg(x) + ((Raw(x-bpp) + Prior(x))/2) |
xor eax, eax |
mov cl, [esi + ebx] // load cl with Prior(x) |
mov al, [edx + ebx] // load al with Raw(x-bpp) |
add ax, cx |
inc ebx |
shr ax, 1 // divide by 2 |
add al, [edi+ebx-1] // Add Avg(x); -1 to offset inc ebx |
cmp ebx, FullLength // Check if at end of array |
mov [edi+ebx-1], al // Write back Raw(x); |
// mov does not affect flags; -1 to offset inc ebx |
jb davg1lp |
davg1end: |
} // end _asm block |
} |
return; |
case 8: // bpp == 8 |
{ |
_asm { |
// Re-init address pointers and offset |
mov ebx, diff // ebx ==> x = offset to alignment boundary |
movq mm5, LBCarryMask |
mov edi, row // edi ==> Avg(x) |
movq mm4, HBClearMask |
mov esi, prev_row // esi ==> Prior(x) |
// PRIME the pump (load the first Raw(x-bpp) data set |
movq mm2, [edi + ebx - 8] // Load previous aligned 8 bytes |
// (NO NEED to correct position in loop below) |
davg8lp: |
movq mm0, [edi + ebx] |
movq mm3, mm5 |
movq mm1, [esi + ebx] |
add ebx, 8 |
pand mm3, mm1 // get lsb for each prev_row byte |
psrlq mm1, 1 // divide prev_row bytes by 2 |
pand mm3, mm2 // get LBCarrys for each byte where both |
// lsb's were == 1 |
psrlq mm2, 1 // divide raw bytes by 2 |
pand mm1, mm4 // clear invalid bit 7 of each byte |
paddb mm0, mm3 // add LBCarrys to Avg for each byte |
pand mm2, mm4 // clear invalid bit 7 of each byte |
paddb mm0, mm1 // add (Prev_row/2) to Avg for each byte |
paddb mm0, mm2 // add (Raw/2) to Avg for each byte |
cmp ebx, MMXLength |
movq [edi + ebx - 8], mm0 |
movq mm2, mm0 // reuse as Raw(x-bpp) |
jb davg8lp |
} // end _asm block |
} |
break; |
default: // bpp greater than 8 |
{ |
_asm { |
movq mm5, LBCarryMask |
// Re-init address pointers and offset |
mov ebx, diff // ebx ==> x = offset to alignment boundary |
mov edi, row // edi ==> Avg(x) |
movq mm4, HBClearMask |
mov edx, edi |
mov esi, prev_row // esi ==> Prior(x) |
sub edx, bpp // edx ==> Raw(x-bpp) |
davgAlp: |
movq mm0, [edi + ebx] |
movq mm3, mm5 |
movq mm1, [esi + ebx] |
pand mm3, mm1 // get lsb for each prev_row byte |
movq mm2, [edx + ebx] |
psrlq mm1, 1 // divide prev_row bytes by 2 |
pand mm3, mm2 // get LBCarrys for each byte where both |
// lsb's were == 1 |
psrlq mm2, 1 // divide raw bytes by 2 |
pand mm1, mm4 // clear invalid bit 7 of each byte |
paddb mm0, mm3 // add LBCarrys to Avg for each byte |
pand mm2, mm4 // clear invalid bit 7 of each byte |
paddb mm0, mm1 // add (Prev_row/2) to Avg for each byte |
add ebx, 8 |
paddb mm0, mm2 // add (Raw/2) to Avg for each byte |
cmp ebx, MMXLength |
movq [edi + ebx - 8], mm0 |
jb davgAlp |
} // end _asm block |
} |
break; |
} // end switch ( bpp ) |
_asm { |
// MMX acceleration complete now do clean-up |
// Check if any remaining bytes left to decode |
mov ebx, MMXLength // ebx ==> x = offset bytes remaining after MMX |
mov edi, row // edi ==> Avg(x) |
cmp ebx, FullLength // Test if offset at end of array |
jnb davgend |
// Do Paeth decode for remaining bytes |
mov esi, prev_row // esi ==> Prior(x) |
mov edx, edi |
xor ecx, ecx // zero ecx before using cl & cx in loop below |
sub edx, bpp // edx ==> Raw(x-bpp) |
davglp2: |
// Raw(x) = Avg(x) + ((Raw(x-bpp) + Prior(x))/2) |
xor eax, eax |
mov cl, [esi + ebx] // load cl with Prior(x) |
mov al, [edx + ebx] // load al with Raw(x-bpp) |
add ax, cx |
inc ebx |
shr ax, 1 // divide by 2 |
add al, [edi+ebx-1] // Add Avg(x); -1 to offset inc ebx |
cmp ebx, FullLength // Check if at end of array |
mov [edi+ebx-1], al // Write back Raw(x); |
// mov does not affect flags; -1 to offset inc ebx |
jb davglp2 |
davgend: |
emms // End MMX instructions; prep for possible FP instrs. |
} // end _asm block |
} |
// Optimized code for PNG Paeth filter decoder |
void /* PRIVATE */ |
png_read_filter_row_mmx_paeth(png_row_infop row_info, png_bytep row, |
png_bytep prev_row) |
{ |
png_uint_32 FullLength; |
png_uint_32 MMXLength; |
//png_uint_32 len; |
int bpp; |
int diff; |
//int ptemp; |
int patemp, pbtemp, pctemp; |
bpp = (row_info->pixel_depth + 7) >> 3; // Get # bytes per pixel |
FullLength = row_info->rowbytes; // # of bytes to filter |
_asm |
{ |
xor ebx, ebx // ebx ==> x offset |
mov edi, row |
xor edx, edx // edx ==> x-bpp offset |
mov esi, prev_row |
xor eax, eax |
// Compute the Raw value for the first bpp bytes |
// Note: the formula works out to be always |
// Paeth(x) = Raw(x) + Prior(x) where x < bpp |
dpthrlp: |
mov al, [edi + ebx] |
add al, [esi + ebx] |
inc ebx |
cmp ebx, bpp |
mov [edi + ebx - 1], al |
jb dpthrlp |
// get # of bytes to alignment |
mov diff, edi // take start of row |
add diff, ebx // add bpp |
xor ecx, ecx |
add diff, 0xf // add 7 + 8 to incr past alignment boundary |
and diff, 0xfffffff8 // mask to alignment boundary |
sub diff, edi // subtract from start ==> value ebx at alignment |
jz dpthgo |
// fix alignment |
dpthlp1: |
xor eax, eax |
// pav = p - a = (a + b - c) - a = b - c |
mov al, [esi + ebx] // load Prior(x) into al |
mov cl, [esi + edx] // load Prior(x-bpp) into cl |
sub eax, ecx // subtract Prior(x-bpp) |
mov patemp, eax // Save pav for later use |
xor eax, eax |
// pbv = p - b = (a + b - c) - b = a - c |
mov al, [edi + edx] // load Raw(x-bpp) into al |
sub eax, ecx // subtract Prior(x-bpp) |
mov ecx, eax |
// pcv = p - c = (a + b - c) -c = (a - c) + (b - c) = pav + pbv |
add eax, patemp // pcv = pav + pbv |
// pc = abs(pcv) |
test eax, 0x80000000 |
jz dpthpca |
neg eax // reverse sign of neg values |
dpthpca: |
mov pctemp, eax // save pc for later use |
// pb = abs(pbv) |
test ecx, 0x80000000 |
jz dpthpba |
neg ecx // reverse sign of neg values |
dpthpba: |
mov pbtemp, ecx // save pb for later use |
// pa = abs(pav) |
mov eax, patemp |
test eax, 0x80000000 |
jz dpthpaa |
neg eax // reverse sign of neg values |
dpthpaa: |
mov patemp, eax // save pa for later use |
// test if pa <= pb |
cmp eax, ecx |
jna dpthabb |
// pa > pb; now test if pb <= pc |
cmp ecx, pctemp |
jna dpthbbc |
// pb > pc; Raw(x) = Paeth(x) + Prior(x-bpp) |
mov cl, [esi + edx] // load Prior(x-bpp) into cl |
jmp dpthpaeth |
dpthbbc: |
// pb <= pc; Raw(x) = Paeth(x) + Prior(x) |
mov cl, [esi + ebx] // load Prior(x) into cl |
jmp dpthpaeth |
dpthabb: |
// pa <= pb; now test if pa <= pc |
cmp eax, pctemp |
jna dpthabc |
// pa > pc; Raw(x) = Paeth(x) + Prior(x-bpp) |
mov cl, [esi + edx] // load Prior(x-bpp) into cl |
jmp dpthpaeth |
dpthabc: |
// pa <= pc; Raw(x) = Paeth(x) + Raw(x-bpp) |
mov cl, [edi + edx] // load Raw(x-bpp) into cl |
dpthpaeth: |
inc ebx |
inc edx |
// Raw(x) = (Paeth(x) + Paeth_Predictor( a, b, c )) mod 256 |
add [edi + ebx - 1], cl |
cmp ebx, diff |
jb dpthlp1 |
dpthgo: |
mov ecx, FullLength |
mov eax, ecx |
sub eax, ebx // subtract alignment fix |
and eax, 0x00000007 // calc bytes over mult of 8 |
sub ecx, eax // drop over bytes from original length |
mov MMXLength, ecx |
} // end _asm block |
// Now do the math for the rest of the row |
switch ( bpp ) |
{ |
case 3: |
{ |
ActiveMask.use = 0x0000000000ffffff; |
ActiveMaskEnd.use = 0xffff000000000000; |
ShiftBpp.use = 24; // == bpp(3) * 8 |
ShiftRem.use = 40; // == 64 - 24 |
_asm |
{ |
mov ebx, diff |
mov edi, row |
mov esi, prev_row |
pxor mm0, mm0 |
// PRIME the pump (load the first Raw(x-bpp) data set |
movq mm1, [edi+ebx-8] |
dpth3lp: |
psrlq mm1, ShiftRem // shift last 3 bytes to 1st 3 bytes |
movq mm2, [esi + ebx] // load b=Prior(x) |
punpcklbw mm1, mm0 // Unpack High bytes of a |
movq mm3, [esi+ebx-8] // Prep c=Prior(x-bpp) bytes |
punpcklbw mm2, mm0 // Unpack High bytes of b |
psrlq mm3, ShiftRem // shift last 3 bytes to 1st 3 bytes |
// pav = p - a = (a + b - c) - a = b - c |
movq mm4, mm2 |
punpcklbw mm3, mm0 // Unpack High bytes of c |
// pbv = p - b = (a + b - c) - b = a - c |
movq mm5, mm1 |
psubw mm4, mm3 |
pxor mm7, mm7 |
// pcv = p - c = (a + b - c) -c = (a - c) + (b - c) = pav + pbv |
movq mm6, mm4 |
psubw mm5, mm3 |
// pa = abs(p-a) = abs(pav) |
// pb = abs(p-b) = abs(pbv) |
// pc = abs(p-c) = abs(pcv) |
pcmpgtw mm0, mm4 // Create mask pav bytes < 0 |
paddw mm6, mm5 |
pand mm0, mm4 // Only pav bytes < 0 in mm7 |
pcmpgtw mm7, mm5 // Create mask pbv bytes < 0 |
psubw mm4, mm0 |
pand mm7, mm5 // Only pbv bytes < 0 in mm0 |
psubw mm4, mm0 |
psubw mm5, mm7 |
pxor mm0, mm0 |
pcmpgtw mm0, mm6 // Create mask pcv bytes < 0 |
pand mm0, mm6 // Only pav bytes < 0 in mm7 |
psubw mm5, mm7 |
psubw mm6, mm0 |
// test pa <= pb |
movq mm7, mm4 |
psubw mm6, mm0 |
pcmpgtw mm7, mm5 // pa > pb? |
movq mm0, mm7 |
// use mm7 mask to merge pa & pb |
pand mm5, mm7 |
// use mm0 mask copy to merge a & b |
pand mm2, mm0 |
pandn mm7, mm4 |
pandn mm0, mm1 |
paddw mm7, mm5 |
paddw mm0, mm2 |
// test ((pa <= pb)? pa:pb) <= pc |
pcmpgtw mm7, mm6 // pab > pc? |
pxor mm1, mm1 |
pand mm3, mm7 |
pandn mm7, mm0 |
paddw mm7, mm3 |
pxor mm0, mm0 |
packuswb mm7, mm1 |
movq mm3, [esi + ebx] // load c=Prior(x-bpp) |
pand mm7, ActiveMask |
movq mm2, mm3 // load b=Prior(x) step 1 |
paddb mm7, [edi + ebx] // add Paeth predictor with Raw(x) |
punpcklbw mm3, mm0 // Unpack High bytes of c |
movq [edi + ebx], mm7 // write back updated value |
movq mm1, mm7 // Now mm1 will be used as Raw(x-bpp) |
// Now do Paeth for 2nd set of bytes (3-5) |
psrlq mm2, ShiftBpp // load b=Prior(x) step 2 |
punpcklbw mm1, mm0 // Unpack High bytes of a |
pxor mm7, mm7 |
punpcklbw mm2, mm0 // Unpack High bytes of b |
// pbv = p - b = (a + b - c) - b = a - c |
movq mm5, mm1 |
// pav = p - a = (a + b - c) - a = b - c |
movq mm4, mm2 |
psubw mm5, mm3 |
psubw mm4, mm3 |
// pcv = p - c = (a + b - c) -c = (a - c) + (b - c) = |
// pav + pbv = pbv + pav |
movq mm6, mm5 |
paddw mm6, mm4 |
// pa = abs(p-a) = abs(pav) |
// pb = abs(p-b) = abs(pbv) |
// pc = abs(p-c) = abs(pcv) |
pcmpgtw mm0, mm5 // Create mask pbv bytes < 0 |
pcmpgtw mm7, mm4 // Create mask pav bytes < 0 |
pand mm0, mm5 // Only pbv bytes < 0 in mm0 |
pand mm7, mm4 // Only pav bytes < 0 in mm7 |
psubw mm5, mm0 |
psubw mm4, mm7 |
psubw mm5, mm0 |
psubw mm4, mm7 |
pxor mm0, mm0 |
pcmpgtw mm0, mm6 // Create mask pcv bytes < 0 |
pand mm0, mm6 // Only pav bytes < 0 in mm7 |
psubw mm6, mm0 |
// test pa <= pb |
movq mm7, mm4 |
psubw mm6, mm0 |
pcmpgtw mm7, mm5 // pa > pb? |
movq mm0, mm7 |
// use mm7 mask to merge pa & pb |
pand mm5, mm7 |
// use mm0 mask copy to merge a & b |
pand mm2, mm0 |
pandn mm7, mm4 |
pandn mm0, mm1 |
paddw mm7, mm5 |
paddw mm0, mm2 |
// test ((pa <= pb)? pa:pb) <= pc |
pcmpgtw mm7, mm6 // pab > pc? |
movq mm2, [esi + ebx] // load b=Prior(x) |
pand mm3, mm7 |
pandn mm7, mm0 |
pxor mm1, mm1 |
paddw mm7, mm3 |
pxor mm0, mm0 |
packuswb mm7, mm1 |
movq mm3, mm2 // load c=Prior(x-bpp) step 1 |
pand mm7, ActiveMask |
punpckhbw mm2, mm0 // Unpack High bytes of b |
psllq mm7, ShiftBpp // Shift bytes to 2nd group of 3 bytes |
// pav = p - a = (a + b - c) - a = b - c |
movq mm4, mm2 |
paddb mm7, [edi + ebx] // add Paeth predictor with Raw(x) |
psllq mm3, ShiftBpp // load c=Prior(x-bpp) step 2 |
movq [edi + ebx], mm7 // write back updated value |
movq mm1, mm7 |
punpckhbw mm3, mm0 // Unpack High bytes of c |
psllq mm1, ShiftBpp // Shift bytes |
// Now mm1 will be used as Raw(x-bpp) |
// Now do Paeth for 3rd, and final, set of bytes (6-7) |
pxor mm7, mm7 |
punpckhbw mm1, mm0 // Unpack High bytes of a |
psubw mm4, mm3 |
// pbv = p - b = (a + b - c) - b = a - c |
movq mm5, mm1 |
// pcv = p - c = (a + b - c) -c = (a - c) + (b - c) = pav + pbv |
movq mm6, mm4 |
psubw mm5, mm3 |
pxor mm0, mm0 |
paddw mm6, mm5 |
// pa = abs(p-a) = abs(pav) |
// pb = abs(p-b) = abs(pbv) |
// pc = abs(p-c) = abs(pcv) |
pcmpgtw mm0, mm4 // Create mask pav bytes < 0 |
pcmpgtw mm7, mm5 // Create mask pbv bytes < 0 |
pand mm0, mm4 // Only pav bytes < 0 in mm7 |
pand mm7, mm5 // Only pbv bytes < 0 in mm0 |
psubw mm4, mm0 |
psubw mm5, mm7 |
psubw mm4, mm0 |
psubw mm5, mm7 |
pxor mm0, mm0 |
pcmpgtw mm0, mm6 // Create mask pcv bytes < 0 |
pand mm0, mm6 // Only pav bytes < 0 in mm7 |
psubw mm6, mm0 |
// test pa <= pb |
movq mm7, mm4 |
psubw mm6, mm0 |
pcmpgtw mm7, mm5 // pa > pb? |
movq mm0, mm7 |
// use mm0 mask copy to merge a & b |
pand mm2, mm0 |
// use mm7 mask to merge pa & pb |
pand mm5, mm7 |
pandn mm0, mm1 |
pandn mm7, mm4 |
paddw mm0, mm2 |
paddw mm7, mm5 |
// test ((pa <= pb)? pa:pb) <= pc |
pcmpgtw mm7, mm6 // pab > pc? |
pand mm3, mm7 |
pandn mm7, mm0 |
paddw mm7, mm3 |
pxor mm1, mm1 |
packuswb mm1, mm7 |
// Step ebx to next set of 8 bytes and repeat loop til done |
add ebx, 8 |
pand mm1, ActiveMaskEnd |
paddb mm1, [edi + ebx - 8] // add Paeth predictor with Raw(x) |
cmp ebx, MMXLength |
pxor mm0, mm0 // pxor does not affect flags |
movq [edi + ebx - 8], mm1 // write back updated value |
// mm1 will be used as Raw(x-bpp) next loop |
// mm3 ready to be used as Prior(x-bpp) next loop |
jb dpth3lp |
} // end _asm block |
} |
break; |
case 6: |
case 7: |
case 5: |
{ |
ActiveMask.use = 0x00000000ffffffff; |
ActiveMask2.use = 0xffffffff00000000; |
ShiftBpp.use = bpp << 3; // == bpp * 8 |
ShiftRem.use = 64 - ShiftBpp.use; |
_asm |
{ |
mov ebx, diff |
mov edi, row |
mov esi, prev_row |
// PRIME the pump (load the first Raw(x-bpp) data set |
movq mm1, [edi+ebx-8] |
pxor mm0, mm0 |
dpth6lp: |
// Must shift to position Raw(x-bpp) data |
psrlq mm1, ShiftRem |
// Do first set of 4 bytes |
movq mm3, [esi+ebx-8] // read c=Prior(x-bpp) bytes |
punpcklbw mm1, mm0 // Unpack Low bytes of a |
movq mm2, [esi + ebx] // load b=Prior(x) |
punpcklbw mm2, mm0 // Unpack Low bytes of b |
// Must shift to position Prior(x-bpp) data |
psrlq mm3, ShiftRem |
// pav = p - a = (a + b - c) - a = b - c |
movq mm4, mm2 |
punpcklbw mm3, mm0 // Unpack Low bytes of c |
// pbv = p - b = (a + b - c) - b = a - c |
movq mm5, mm1 |
psubw mm4, mm3 |
pxor mm7, mm7 |
// pcv = p - c = (a + b - c) -c = (a - c) + (b - c) = pav + pbv |
movq mm6, mm4 |
psubw mm5, mm3 |
// pa = abs(p-a) = abs(pav) |
// pb = abs(p-b) = abs(pbv) |
// pc = abs(p-c) = abs(pcv) |
pcmpgtw mm0, mm4 // Create mask pav bytes < 0 |
paddw mm6, mm5 |
pand mm0, mm4 // Only pav bytes < 0 in mm7 |
pcmpgtw mm7, mm5 // Create mask pbv bytes < 0 |
psubw mm4, mm0 |
pand mm7, mm5 // Only pbv bytes < 0 in mm0 |
psubw mm4, mm0 |
psubw mm5, mm7 |
pxor mm0, mm0 |
pcmpgtw mm0, mm6 // Create mask pcv bytes < 0 |
pand mm0, mm6 // Only pav bytes < 0 in mm7 |
psubw mm5, mm7 |
psubw mm6, mm0 |
// test pa <= pb |
movq mm7, mm4 |
psubw mm6, mm0 |
pcmpgtw mm7, mm5 // pa > pb? |
movq mm0, mm7 |
// use mm7 mask to merge pa & pb |
pand mm5, mm7 |
// use mm0 mask copy to merge a & b |
pand mm2, mm0 |
pandn mm7, mm4 |
pandn mm0, mm1 |
paddw mm7, mm5 |
paddw mm0, mm2 |
// test ((pa <= pb)? pa:pb) <= pc |
pcmpgtw mm7, mm6 // pab > pc? |
pxor mm1, mm1 |
pand mm3, mm7 |
pandn mm7, mm0 |
paddw mm7, mm3 |
pxor mm0, mm0 |
packuswb mm7, mm1 |
movq mm3, [esi + ebx - 8] // load c=Prior(x-bpp) |
pand mm7, ActiveMask |
psrlq mm3, ShiftRem |
movq mm2, [esi + ebx] // load b=Prior(x) step 1 |
paddb mm7, [edi + ebx] // add Paeth predictor with Raw(x) |
movq mm6, mm2 |
movq [edi + ebx], mm7 // write back updated value |
movq mm1, [edi+ebx-8] |
psllq mm6, ShiftBpp |
movq mm5, mm7 |
psrlq mm1, ShiftRem |
por mm3, mm6 |
psllq mm5, ShiftBpp |
punpckhbw mm3, mm0 // Unpack High bytes of c |
por mm1, mm5 |
// Do second set of 4 bytes |
punpckhbw mm2, mm0 // Unpack High bytes of b |
punpckhbw mm1, mm0 // Unpack High bytes of a |
// pav = p - a = (a + b - c) - a = b - c |
movq mm4, mm2 |
// pbv = p - b = (a + b - c) - b = a - c |
movq mm5, mm1 |
psubw mm4, mm3 |
pxor mm7, mm7 |
// pcv = p - c = (a + b - c) -c = (a - c) + (b - c) = pav + pbv |
movq mm6, mm4 |
psubw mm5, mm3 |
// pa = abs(p-a) = abs(pav) |
// pb = abs(p-b) = abs(pbv) |
// pc = abs(p-c) = abs(pcv) |
pcmpgtw mm0, mm4 // Create mask pav bytes < 0 |
paddw mm6, mm5 |
pand mm0, mm4 // Only pav bytes < 0 in mm7 |
pcmpgtw mm7, mm5 // Create mask pbv bytes < 0 |
psubw mm4, mm0 |
pand mm7, mm5 // Only pbv bytes < 0 in mm0 |
psubw mm4, mm0 |
psubw mm5, mm7 |
pxor mm0, mm0 |
pcmpgtw mm0, mm6 // Create mask pcv bytes < 0 |
pand mm0, mm6 // Only pav bytes < 0 in mm7 |
psubw mm5, mm7 |
psubw mm6, mm0 |
// test pa <= pb |
movq mm7, mm4 |
psubw mm6, mm0 |
pcmpgtw mm7, mm5 // pa > pb? |
movq mm0, mm7 |
// use mm7 mask to merge pa & pb |
pand mm5, mm7 |
// use mm0 mask copy to merge a & b |
pand mm2, mm0 |
pandn mm7, mm4 |
pandn mm0, mm1 |
paddw mm7, mm5 |
paddw mm0, mm2 |
// test ((pa <= pb)? pa:pb) <= pc |
pcmpgtw mm7, mm6 // pab > pc? |
pxor mm1, mm1 |
pand mm3, mm7 |
pandn mm7, mm0 |
pxor mm1, mm1 |
paddw mm7, mm3 |
pxor mm0, mm0 |
// Step ex to next set of 8 bytes and repeat loop til done |
add ebx, 8 |
packuswb mm1, mm7 |
paddb mm1, [edi + ebx - 8] // add Paeth predictor with Raw(x) |
cmp ebx, MMXLength |
movq [edi + ebx - 8], mm1 // write back updated value |
// mm1 will be used as Raw(x-bpp) next loop |
jb dpth6lp |
} // end _asm block |
} |
break; |
case 4: |
{ |
ActiveMask.use = 0x00000000ffffffff; |
_asm { |
mov ebx, diff |
mov edi, row |
mov esi, prev_row |
pxor mm0, mm0 |
// PRIME the pump (load the first Raw(x-bpp) data set |
movq mm1, [edi+ebx-8] // Only time should need to read |
// a=Raw(x-bpp) bytes |
dpth4lp: |
// Do first set of 4 bytes |
movq mm3, [esi+ebx-8] // read c=Prior(x-bpp) bytes |
punpckhbw mm1, mm0 // Unpack Low bytes of a |
movq mm2, [esi + ebx] // load b=Prior(x) |
punpcklbw mm2, mm0 // Unpack High bytes of b |
// pav = p - a = (a + b - c) - a = b - c |
movq mm4, mm2 |
punpckhbw mm3, mm0 // Unpack High bytes of c |
// pbv = p - b = (a + b - c) - b = a - c |
movq mm5, mm1 |
psubw mm4, mm3 |
pxor mm7, mm7 |
// pcv = p - c = (a + b - c) -c = (a - c) + (b - c) = pav + pbv |
movq mm6, mm4 |
psubw mm5, mm3 |
// pa = abs(p-a) = abs(pav) |
// pb = abs(p-b) = abs(pbv) |
// pc = abs(p-c) = abs(pcv) |
pcmpgtw mm0, mm4 // Create mask pav bytes < 0 |
paddw mm6, mm5 |
pand mm0, mm4 // Only pav bytes < 0 in mm7 |
pcmpgtw mm7, mm5 // Create mask pbv bytes < 0 |
psubw mm4, mm0 |
pand mm7, mm5 // Only pbv bytes < 0 in mm0 |
psubw mm4, mm0 |
psubw mm5, mm7 |
pxor mm0, mm0 |
pcmpgtw mm0, mm6 // Create mask pcv bytes < 0 |
pand mm0, mm6 // Only pav bytes < 0 in mm7 |
psubw mm5, mm7 |
psubw mm6, mm0 |
// test pa <= pb |
movq mm7, mm4 |
psubw mm6, mm0 |
pcmpgtw mm7, mm5 // pa > pb? |
movq mm0, mm7 |
// use mm7 mask to merge pa & pb |
pand mm5, mm7 |
// use mm0 mask copy to merge a & b |
pand mm2, mm0 |
pandn mm7, mm4 |
pandn mm0, mm1 |
paddw mm7, mm5 |
paddw mm0, mm2 |
// test ((pa <= pb)? pa:pb) <= pc |
pcmpgtw mm7, mm6 // pab > pc? |
pxor mm1, mm1 |
pand mm3, mm7 |
pandn mm7, mm0 |
paddw mm7, mm3 |
pxor mm0, mm0 |
packuswb mm7, mm1 |
movq mm3, [esi + ebx] // load c=Prior(x-bpp) |
pand mm7, ActiveMask |
movq mm2, mm3 // load b=Prior(x) step 1 |
paddb mm7, [edi + ebx] // add Paeth predictor with Raw(x) |
punpcklbw mm3, mm0 // Unpack High bytes of c |
movq [edi + ebx], mm7 // write back updated value |
movq mm1, mm7 // Now mm1 will be used as Raw(x-bpp) |
// Do second set of 4 bytes |
punpckhbw mm2, mm0 // Unpack Low bytes of b |
punpcklbw mm1, mm0 // Unpack Low bytes of a |
// pav = p - a = (a + b - c) - a = b - c |
movq mm4, mm2 |
// pbv = p - b = (a + b - c) - b = a - c |
movq mm5, mm1 |
psubw mm4, mm3 |
pxor mm7, mm7 |
// pcv = p - c = (a + b - c) -c = (a - c) + (b - c) = pav + pbv |
movq mm6, mm4 |
psubw mm5, mm3 |
// pa = abs(p-a) = abs(pav) |
// pb = abs(p-b) = abs(pbv) |
// pc = abs(p-c) = abs(pcv) |
pcmpgtw mm0, mm4 // Create mask pav bytes < 0 |
paddw mm6, mm5 |
pand mm0, mm4 // Only pav bytes < 0 in mm7 |
pcmpgtw mm7, mm5 // Create mask pbv bytes < 0 |
psubw mm4, mm0 |
pand mm7, mm5 // Only pbv bytes < 0 in mm0 |
psubw mm4, mm0 |
psubw mm5, mm7 |
pxor mm0, mm0 |
pcmpgtw mm0, mm6 // Create mask pcv bytes < 0 |
pand mm0, mm6 // Only pav bytes < 0 in mm7 |
psubw mm5, mm7 |
psubw mm6, mm0 |
// test pa <= pb |
movq mm7, mm4 |
psubw mm6, mm0 |
pcmpgtw mm7, mm5 // pa > pb? |
movq mm0, mm7 |
// use mm7 mask to merge pa & pb |
pand mm5, mm7 |
// use mm0 mask copy to merge a & b |
pand mm2, mm0 |
pandn mm7, mm4 |
pandn mm0, mm1 |
paddw mm7, mm5 |
paddw mm0, mm2 |
// test ((pa <= pb)? pa:pb) <= pc |
pcmpgtw mm7, mm6 // pab > pc? |
pxor mm1, mm1 |
pand mm3, mm7 |
pandn mm7, mm0 |
pxor mm1, mm1 |
paddw mm7, mm3 |
pxor mm0, mm0 |
// Step ex to next set of 8 bytes and repeat loop til done |
add ebx, 8 |
packuswb mm1, mm7 |
paddb mm1, [edi + ebx - 8] // add Paeth predictor with Raw(x) |
cmp ebx, MMXLength |
movq [edi + ebx - 8], mm1 // write back updated value |
// mm1 will be used as Raw(x-bpp) next loop |
jb dpth4lp |
} // end _asm block |
} |
break; |
case 8: // bpp == 8 |
{ |
ActiveMask.use = 0x00000000ffffffff; |
_asm { |
mov ebx, diff |
mov edi, row |
mov esi, prev_row |
pxor mm0, mm0 |
// PRIME the pump (load the first Raw(x-bpp) data set |
movq mm1, [edi+ebx-8] // Only time should need to read |
// a=Raw(x-bpp) bytes |
dpth8lp: |
// Do first set of 4 bytes |
movq mm3, [esi+ebx-8] // read c=Prior(x-bpp) bytes |
punpcklbw mm1, mm0 // Unpack Low bytes of a |
movq mm2, [esi + ebx] // load b=Prior(x) |
punpcklbw mm2, mm0 // Unpack Low bytes of b |
// pav = p - a = (a + b - c) - a = b - c |
movq mm4, mm2 |
punpcklbw mm3, mm0 // Unpack Low bytes of c |
// pbv = p - b = (a + b - c) - b = a - c |
movq mm5, mm1 |
psubw mm4, mm3 |
pxor mm7, mm7 |
// pcv = p - c = (a + b - c) -c = (a - c) + (b - c) = pav + pbv |
movq mm6, mm4 |
psubw mm5, mm3 |
// pa = abs(p-a) = abs(pav) |
// pb = abs(p-b) = abs(pbv) |
// pc = abs(p-c) = abs(pcv) |
pcmpgtw mm0, mm4 // Create mask pav bytes < 0 |
paddw mm6, mm5 |
pand mm0, mm4 // Only pav bytes < 0 in mm7 |
pcmpgtw mm7, mm5 // Create mask pbv bytes < 0 |
psubw mm4, mm0 |
pand mm7, mm5 // Only pbv bytes < 0 in mm0 |
psubw mm4, mm0 |
psubw mm5, mm7 |
pxor mm0, mm0 |
pcmpgtw mm0, mm6 // Create mask pcv bytes < 0 |
pand mm0, mm6 // Only pav bytes < 0 in mm7 |
psubw mm5, mm7 |
psubw mm6, mm0 |
// test pa <= pb |
movq mm7, mm4 |
psubw mm6, mm0 |
pcmpgtw mm7, mm5 // pa > pb? |
movq mm0, mm7 |
// use mm7 mask to merge pa & pb |
pand mm5, mm7 |
// use mm0 mask copy to merge a & b |
pand mm2, mm0 |
pandn mm7, mm4 |
pandn mm0, mm1 |
paddw mm7, mm5 |
paddw mm0, mm2 |
// test ((pa <= pb)? pa:pb) <= pc |
pcmpgtw mm7, mm6 // pab > pc? |
pxor mm1, mm1 |
pand mm3, mm7 |
pandn mm7, mm0 |
paddw mm7, mm3 |
pxor mm0, mm0 |
packuswb mm7, mm1 |
movq mm3, [esi+ebx-8] // read c=Prior(x-bpp) bytes |
pand mm7, ActiveMask |
movq mm2, [esi + ebx] // load b=Prior(x) |
paddb mm7, [edi + ebx] // add Paeth predictor with Raw(x) |
punpckhbw mm3, mm0 // Unpack High bytes of c |
movq [edi + ebx], mm7 // write back updated value |
movq mm1, [edi+ebx-8] // read a=Raw(x-bpp) bytes |
// Do second set of 4 bytes |
punpckhbw mm2, mm0 // Unpack High bytes of b |
punpckhbw mm1, mm0 // Unpack High bytes of a |
// pav = p - a = (a + b - c) - a = b - c |
movq mm4, mm2 |
// pbv = p - b = (a + b - c) - b = a - c |
movq mm5, mm1 |
psubw mm4, mm3 |
pxor mm7, mm7 |
// pcv = p - c = (a + b - c) -c = (a - c) + (b - c) = pav + pbv |
movq mm6, mm4 |
psubw mm5, mm3 |
// pa = abs(p-a) = abs(pav) |
// pb = abs(p-b) = abs(pbv) |
// pc = abs(p-c) = abs(pcv) |
pcmpgtw mm0, mm4 // Create mask pav bytes < 0 |
paddw mm6, mm5 |
pand mm0, mm4 // Only pav bytes < 0 in mm7 |
pcmpgtw mm7, mm5 // Create mask pbv bytes < 0 |
psubw mm4, mm0 |
pand mm7, mm5 // Only pbv bytes < 0 in mm0 |
psubw mm4, mm0 |
psubw mm5, mm7 |
pxor mm0, mm0 |
pcmpgtw mm0, mm6 // Create mask pcv bytes < 0 |
pand mm0, mm6 // Only pav bytes < 0 in mm7 |
psubw mm5, mm7 |
psubw mm6, mm0 |
// test pa <= pb |
movq mm7, mm4 |
psubw mm6, mm0 |
pcmpgtw mm7, mm5 // pa > pb? |
movq mm0, mm7 |
// use mm7 mask to merge pa & pb |
pand mm5, mm7 |
// use mm0 mask copy to merge a & b |
pand mm2, mm0 |
pandn mm7, mm4 |
pandn mm0, mm1 |
paddw mm7, mm5 |
paddw mm0, mm2 |
// test ((pa <= pb)? pa:pb) <= pc |
pcmpgtw mm7, mm6 // pab > pc? |
pxor mm1, mm1 |
pand mm3, mm7 |
pandn mm7, mm0 |
pxor mm1, mm1 |
paddw mm7, mm3 |
pxor mm0, mm0 |
// Step ex to next set of 8 bytes and repeat loop til done |
add ebx, 8 |
packuswb mm1, mm7 |
paddb mm1, [edi + ebx - 8] // add Paeth predictor with Raw(x) |
cmp ebx, MMXLength |
movq [edi + ebx - 8], mm1 // write back updated value |
// mm1 will be used as Raw(x-bpp) next loop |
jb dpth8lp |
} // end _asm block |
} |
break; |
case 1: // bpp = 1 |
case 2: // bpp = 2 |
default: // bpp > 8 |
{ |
_asm { |
mov ebx, diff |
cmp ebx, FullLength |
jnb dpthdend |
mov edi, row |
mov esi, prev_row |
// Do Paeth decode for remaining bytes |
mov edx, ebx |
xor ecx, ecx // zero ecx before using cl & cx in loop below |
sub edx, bpp // Set edx = ebx - bpp |
dpthdlp: |
xor eax, eax |
// pav = p - a = (a + b - c) - a = b - c |
mov al, [esi + ebx] // load Prior(x) into al |
mov cl, [esi + edx] // load Prior(x-bpp) into cl |
sub eax, ecx // subtract Prior(x-bpp) |
mov patemp, eax // Save pav for later use |
xor eax, eax |
// pbv = p - b = (a + b - c) - b = a - c |
mov al, [edi + edx] // load Raw(x-bpp) into al |
sub eax, ecx // subtract Prior(x-bpp) |
mov ecx, eax |
// pcv = p - c = (a + b - c) -c = (a - c) + (b - c) = pav + pbv |
add eax, patemp // pcv = pav + pbv |
// pc = abs(pcv) |
test eax, 0x80000000 |
jz dpthdpca |
neg eax // reverse sign of neg values |
dpthdpca: |
mov pctemp, eax // save pc for later use |
// pb = abs(pbv) |
test ecx, 0x80000000 |
jz dpthdpba |
neg ecx // reverse sign of neg values |
dpthdpba: |
mov pbtemp, ecx // save pb for later use |
// pa = abs(pav) |
mov eax, patemp |
test eax, 0x80000000 |
jz dpthdpaa |
neg eax // reverse sign of neg values |
dpthdpaa: |
mov patemp, eax // save pa for later use |
// test if pa <= pb |
cmp eax, ecx |
jna dpthdabb |
// pa > pb; now test if pb <= pc |
cmp ecx, pctemp |
jna dpthdbbc |
// pb > pc; Raw(x) = Paeth(x) + Prior(x-bpp) |
mov cl, [esi + edx] // load Prior(x-bpp) into cl |
jmp dpthdpaeth |
dpthdbbc: |
// pb <= pc; Raw(x) = Paeth(x) + Prior(x) |
mov cl, [esi + ebx] // load Prior(x) into cl |
jmp dpthdpaeth |
dpthdabb: |
// pa <= pb; now test if pa <= pc |
cmp eax, pctemp |
jna dpthdabc |
// pa > pc; Raw(x) = Paeth(x) + Prior(x-bpp) |
mov cl, [esi + edx] // load Prior(x-bpp) into cl |
jmp dpthdpaeth |
dpthdabc: |
// pa <= pc; Raw(x) = Paeth(x) + Raw(x-bpp) |
mov cl, [edi + edx] // load Raw(x-bpp) into cl |
dpthdpaeth: |
inc ebx |
inc edx |
// Raw(x) = (Paeth(x) + Paeth_Predictor( a, b, c )) mod 256 |
add [edi + ebx - 1], cl |
cmp ebx, FullLength |
jb dpthdlp |
dpthdend: |
} // end _asm block |
} |
return; // No need to go further with this one |
} // end switch ( bpp ) |
_asm |
{ |
// MMX acceleration complete now do clean-up |
// Check if any remaining bytes left to decode |
mov ebx, MMXLength |
cmp ebx, FullLength |
jnb dpthend |
mov edi, row |
mov esi, prev_row |
// Do Paeth decode for remaining bytes |
mov edx, ebx |
xor ecx, ecx // zero ecx before using cl & cx in loop below |
sub edx, bpp // Set edx = ebx - bpp |
dpthlp2: |
xor eax, eax |
// pav = p - a = (a + b - c) - a = b - c |
mov al, [esi + ebx] // load Prior(x) into al |
mov cl, [esi + edx] // load Prior(x-bpp) into cl |
sub eax, ecx // subtract Prior(x-bpp) |
mov patemp, eax // Save pav for later use |
xor eax, eax |
// pbv = p - b = (a + b - c) - b = a - c |
mov al, [edi + edx] // load Raw(x-bpp) into al |
sub eax, ecx // subtract Prior(x-bpp) |
mov ecx, eax |
// pcv = p - c = (a + b - c) -c = (a - c) + (b - c) = pav + pbv |
add eax, patemp // pcv = pav + pbv |
// pc = abs(pcv) |
test eax, 0x80000000 |
jz dpthpca2 |
neg eax // reverse sign of neg values |
dpthpca2: |
mov pctemp, eax // save pc for later use |
// pb = abs(pbv) |
test ecx, 0x80000000 |
jz dpthpba2 |
neg ecx // reverse sign of neg values |
dpthpba2: |
mov pbtemp, ecx // save pb for later use |
// pa = abs(pav) |
mov eax, patemp |
test eax, 0x80000000 |
jz dpthpaa2 |
neg eax // reverse sign of neg values |
dpthpaa2: |
mov patemp, eax // save pa for later use |
// test if pa <= pb |
cmp eax, ecx |
jna dpthabb2 |
// pa > pb; now test if pb <= pc |
cmp ecx, pctemp |
jna dpthbbc2 |
// pb > pc; Raw(x) = Paeth(x) + Prior(x-bpp) |
mov cl, [esi + edx] // load Prior(x-bpp) into cl |
jmp dpthpaeth2 |
dpthbbc2: |
// pb <= pc; Raw(x) = Paeth(x) + Prior(x) |
mov cl, [esi + ebx] // load Prior(x) into cl |
jmp dpthpaeth2 |
dpthabb2: |
// pa <= pb; now test if pa <= pc |
cmp eax, pctemp |
jna dpthabc2 |
// pa > pc; Raw(x) = Paeth(x) + Prior(x-bpp) |
mov cl, [esi + edx] // load Prior(x-bpp) into cl |
jmp dpthpaeth2 |
dpthabc2: |
// pa <= pc; Raw(x) = Paeth(x) + Raw(x-bpp) |
mov cl, [edi + edx] // load Raw(x-bpp) into cl |
dpthpaeth2: |
inc ebx |
inc edx |
// Raw(x) = (Paeth(x) + Paeth_Predictor( a, b, c )) mod 256 |
add [edi + ebx - 1], cl |
cmp ebx, FullLength |
jb dpthlp2 |
dpthend: |
emms // End MMX instructions; prep for possible FP instrs. |
} // end _asm block |
} |
// Optimized code for PNG Sub filter decoder |
void /* PRIVATE */ |
png_read_filter_row_mmx_sub(png_row_infop row_info, png_bytep row) |
{ |
//int test; |
int bpp; |
png_uint_32 FullLength; |
png_uint_32 MMXLength; |
int diff; |
bpp = (row_info->pixel_depth + 7) >> 3; // Get # bytes per pixel |
FullLength = row_info->rowbytes - bpp; // # of bytes to filter |
_asm { |
mov edi, row |
mov esi, edi // lp = row |
add edi, bpp // rp = row + bpp |
xor eax, eax |
// get # of bytes to alignment |
mov diff, edi // take start of row |
add diff, 0xf // add 7 + 8 to incr past |
// alignment boundary |
xor ebx, ebx |
and diff, 0xfffffff8 // mask to alignment boundary |
sub diff, edi // subtract from start ==> value |
// ebx at alignment |
jz dsubgo |
// fix alignment |
dsublp1: |
mov al, [esi+ebx] |
add [edi+ebx], al |
inc ebx |
cmp ebx, diff |
jb dsublp1 |
dsubgo: |
mov ecx, FullLength |
mov edx, ecx |
sub edx, ebx // subtract alignment fix |
and edx, 0x00000007 // calc bytes over mult of 8 |
sub ecx, edx // drop over bytes from length |
mov MMXLength, ecx |
} // end _asm block |
// Now do the math for the rest of the row |
switch ( bpp ) |
{ |
case 3: |
{ |
ActiveMask.use = 0x0000ffffff000000; |
ShiftBpp.use = 24; // == 3 * 8 |
ShiftRem.use = 40; // == 64 - 24 |
_asm { |
mov edi, row |
movq mm7, ActiveMask // Load ActiveMask for 2nd active byte group |
mov esi, edi // lp = row |
add edi, bpp // rp = row + bpp |
movq mm6, mm7 |
mov ebx, diff |
psllq mm6, ShiftBpp // Move mask in mm6 to cover 3rd active |
// byte group |
// PRIME the pump (load the first Raw(x-bpp) data set |
movq mm1, [edi+ebx-8] |
dsub3lp: |
psrlq mm1, ShiftRem // Shift data for adding 1st bpp bytes |
// no need for mask; shift clears inactive bytes |
// Add 1st active group |
movq mm0, [edi+ebx] |
paddb mm0, mm1 |
// Add 2nd active group |
movq mm1, mm0 // mov updated Raws to mm1 |
psllq mm1, ShiftBpp // shift data to position correctly |
pand mm1, mm7 // mask to use only 2nd active group |
paddb mm0, mm1 |
// Add 3rd active group |
movq mm1, mm0 // mov updated Raws to mm1 |
psllq mm1, ShiftBpp // shift data to position correctly |
pand mm1, mm6 // mask to use only 3rd active group |
add ebx, 8 |
paddb mm0, mm1 |
cmp ebx, MMXLength |
movq [edi+ebx-8], mm0 // Write updated Raws back to array |
// Prep for doing 1st add at top of loop |
movq mm1, mm0 |
jb dsub3lp |
} // end _asm block |
} |
break; |
case 1: |
{ |
// Placed here just in case this is a duplicate of the |
// non-MMX code for the SUB filter in png_read_filter_row below |
// |
// png_bytep rp; |
// png_bytep lp; |
// png_uint_32 i; |
// bpp = (row_info->pixel_depth + 7) >> 3; |
// for (i = (png_uint_32)bpp, rp = row + bpp, lp = row; |
// i < row_info->rowbytes; i++, rp++, lp++) |
// { |
// *rp = (png_byte)(((int)(*rp) + (int)(*lp)) & 0xff); |
// } |
_asm { |
mov ebx, diff |
mov edi, row |
cmp ebx, FullLength |
jnb dsub1end |
mov esi, edi // lp = row |
xor eax, eax |
add edi, bpp // rp = row + bpp |
dsub1lp: |
mov al, [esi+ebx] |
add [edi+ebx], al |
inc ebx |
cmp ebx, FullLength |
jb dsub1lp |
dsub1end: |
} // end _asm block |
} |
return; |
case 6: |
case 7: |
case 4: |
case 5: |
{ |
ShiftBpp.use = bpp << 3; |
ShiftRem.use = 64 - ShiftBpp.use; |
_asm { |
mov edi, row |
mov ebx, diff |
mov esi, edi // lp = row |
add edi, bpp // rp = row + bpp |
// PRIME the pump (load the first Raw(x-bpp) data set |
movq mm1, [edi+ebx-8] |
dsub4lp: |
psrlq mm1, ShiftRem // Shift data for adding 1st bpp bytes |
// no need for mask; shift clears inactive bytes |
movq mm0, [edi+ebx] |
paddb mm0, mm1 |
// Add 2nd active group |
movq mm1, mm0 // mov updated Raws to mm1 |
psllq mm1, ShiftBpp // shift data to position correctly |
// there is no need for any mask |
// since shift clears inactive bits/bytes |
add ebx, 8 |
paddb mm0, mm1 |
cmp ebx, MMXLength |
movq [edi+ebx-8], mm0 |
movq mm1, mm0 // Prep for doing 1st add at top of loop |
jb dsub4lp |
} // end _asm block |
} |
break; |
case 2: |
{ |
ActiveMask.use = 0x00000000ffff0000; |
ShiftBpp.use = 16; // == 2 * 8 |
ShiftRem.use = 48; // == 64 - 16 |
_asm { |
movq mm7, ActiveMask // Load ActiveMask for 2nd active byte group |
mov ebx, diff |
movq mm6, mm7 |
mov edi, row |
psllq mm6, ShiftBpp // Move mask in mm6 to cover 3rd active |
// byte group |
mov esi, edi // lp = row |
movq mm5, mm6 |
add edi, bpp // rp = row + bpp |
psllq mm5, ShiftBpp // Move mask in mm5 to cover 4th active |
// byte group |
// PRIME the pump (load the first Raw(x-bpp) data set |
movq mm1, [edi+ebx-8] |
dsub2lp: |
// Add 1st active group |
psrlq mm1, ShiftRem // Shift data for adding 1st bpp bytes |
// no need for mask; shift clears inactive |
// bytes |
movq mm0, [edi+ebx] |
paddb mm0, mm1 |
// Add 2nd active group |
movq mm1, mm0 // mov updated Raws to mm1 |
psllq mm1, ShiftBpp // shift data to position correctly |
pand mm1, mm7 // mask to use only 2nd active group |
paddb mm0, mm1 |
// Add 3rd active group |
movq mm1, mm0 // mov updated Raws to mm1 |
psllq mm1, ShiftBpp // shift data to position correctly |
pand mm1, mm6 // mask to use only 3rd active group |
paddb mm0, mm1 |
// Add 4th active group |
movq mm1, mm0 // mov updated Raws to mm1 |
psllq mm1, ShiftBpp // shift data to position correctly |
pand mm1, mm5 // mask to use only 4th active group |
add ebx, 8 |
paddb mm0, mm1 |
cmp ebx, MMXLength |
movq [edi+ebx-8], mm0 // Write updated Raws back to array |
movq mm1, mm0 // Prep for doing 1st add at top of loop |
jb dsub2lp |
} // end _asm block |
} |
break; |
case 8: |
{ |
_asm { |
mov edi, row |
mov ebx, diff |
mov esi, edi // lp = row |
add edi, bpp // rp = row + bpp |
mov ecx, MMXLength |
movq mm7, [edi+ebx-8] // PRIME the pump (load the first |
// Raw(x-bpp) data set |
and ecx, 0x0000003f // calc bytes over mult of 64 |
dsub8lp: |
movq mm0, [edi+ebx] // Load Sub(x) for 1st 8 bytes |
paddb mm0, mm7 |
movq mm1, [edi+ebx+8] // Load Sub(x) for 2nd 8 bytes |
movq [edi+ebx], mm0 // Write Raw(x) for 1st 8 bytes |
// Now mm0 will be used as Raw(x-bpp) for |
// the 2nd group of 8 bytes. This will be |
// repeated for each group of 8 bytes with |
// the 8th group being used as the Raw(x-bpp) |
// for the 1st group of the next loop. |
paddb mm1, mm0 |
movq mm2, [edi+ebx+16] // Load Sub(x) for 3rd 8 bytes |
movq [edi+ebx+8], mm1 // Write Raw(x) for 2nd 8 bytes |
paddb mm2, mm1 |
movq mm3, [edi+ebx+24] // Load Sub(x) for 4th 8 bytes |
movq [edi+ebx+16], mm2 // Write Raw(x) for 3rd 8 bytes |
paddb mm3, mm2 |
movq mm4, [edi+ebx+32] // Load Sub(x) for 5th 8 bytes |
movq [edi+ebx+24], mm3 // Write Raw(x) for 4th 8 bytes |
paddb mm4, mm3 |
movq mm5, [edi+ebx+40] // Load Sub(x) for 6th 8 bytes |
movq [edi+ebx+32], mm4 // Write Raw(x) for 5th 8 bytes |
paddb mm5, mm4 |
movq mm6, [edi+ebx+48] // Load Sub(x) for 7th 8 bytes |
movq [edi+ebx+40], mm5 // Write Raw(x) for 6th 8 bytes |
paddb mm6, mm5 |
movq mm7, [edi+ebx+56] // Load Sub(x) for 8th 8 bytes |
movq [edi+ebx+48], mm6 // Write Raw(x) for 7th 8 bytes |
add ebx, 64 |
paddb mm7, mm6 |
cmp ebx, ecx |
movq [edi+ebx-8], mm7 // Write Raw(x) for 8th 8 bytes |
jb dsub8lp |
cmp ebx, MMXLength |
jnb dsub8lt8 |
dsub8lpA: |
movq mm0, [edi+ebx] |
add ebx, 8 |
paddb mm0, mm7 |
cmp ebx, MMXLength |
movq [edi+ebx-8], mm0 // use -8 to offset early add to ebx |
movq mm7, mm0 // Move calculated Raw(x) data to mm1 to |
// be the new Raw(x-bpp) for the next loop |
jb dsub8lpA |
dsub8lt8: |
} // end _asm block |
} |
break; |
default: // bpp greater than 8 bytes |
{ |
_asm { |
mov ebx, diff |
mov edi, row |
mov esi, edi // lp = row |
add edi, bpp // rp = row + bpp |
dsubAlp: |
movq mm0, [edi+ebx] |
movq mm1, [esi+ebx] |
add ebx, 8 |
paddb mm0, mm1 |
cmp ebx, MMXLength |
movq [edi+ebx-8], mm0 // mov does not affect flags; -8 to offset |
// add ebx |
jb dsubAlp |
} // end _asm block |
} |
break; |
} // end switch ( bpp ) |
_asm { |
mov ebx, MMXLength |
mov edi, row |
cmp ebx, FullLength |
jnb dsubend |
mov esi, edi // lp = row |
xor eax, eax |
add edi, bpp // rp = row + bpp |
dsublp2: |
mov al, [esi+ebx] |
add [edi+ebx], al |
inc ebx |
cmp ebx, FullLength |
jb dsublp2 |
dsubend: |
emms // End MMX instructions; prep for possible FP instrs. |
} // end _asm block |
} |
// Optimized code for PNG Up filter decoder |
void /* PRIVATE */ |
png_read_filter_row_mmx_up(png_row_infop row_info, png_bytep row, |
png_bytep prev_row) |
{ |
png_uint_32 len; |
len = row_info->rowbytes; // # of bytes to filter |
_asm { |
mov edi, row |
// get # of bytes to alignment |
mov ecx, edi |
xor ebx, ebx |
add ecx, 0x7 |
xor eax, eax |
and ecx, 0xfffffff8 |
mov esi, prev_row |
sub ecx, edi |
jz dupgo |
// fix alignment |
duplp1: |
mov al, [edi+ebx] |
add al, [esi+ebx] |
inc ebx |
cmp ebx, ecx |
mov [edi + ebx-1], al // mov does not affect flags; -1 to offset inc ebx |
jb duplp1 |
dupgo: |
mov ecx, len |
mov edx, ecx |
sub edx, ebx // subtract alignment fix |
and edx, 0x0000003f // calc bytes over mult of 64 |
sub ecx, edx // drop over bytes from length |
// Unrolled loop - use all MMX registers and interleave to reduce |
// number of branch instructions (loops) and reduce partial stalls |
duploop: |
movq mm1, [esi+ebx] |
movq mm0, [edi+ebx] |
movq mm3, [esi+ebx+8] |
paddb mm0, mm1 |
movq mm2, [edi+ebx+8] |
movq [edi+ebx], mm0 |
paddb mm2, mm3 |
movq mm5, [esi+ebx+16] |
movq [edi+ebx+8], mm2 |
movq mm4, [edi+ebx+16] |
movq mm7, [esi+ebx+24] |
paddb mm4, mm5 |
movq mm6, [edi+ebx+24] |
movq [edi+ebx+16], mm4 |
paddb mm6, mm7 |
movq mm1, [esi+ebx+32] |
movq [edi+ebx+24], mm6 |
movq mm0, [edi+ebx+32] |
movq mm3, [esi+ebx+40] |
paddb mm0, mm1 |
movq mm2, [edi+ebx+40] |
movq [edi+ebx+32], mm0 |
paddb mm2, mm3 |
movq mm5, [esi+ebx+48] |
movq [edi+ebx+40], mm2 |
movq mm4, [edi+ebx+48] |
movq mm7, [esi+ebx+56] |
paddb mm4, mm5 |
movq mm6, [edi+ebx+56] |
movq [edi+ebx+48], mm4 |
add ebx, 64 |
paddb mm6, mm7 |
cmp ebx, ecx |
movq [edi+ebx-8], mm6 // (+56)movq does not affect flags; |
// -8 to offset add ebx |
jb duploop |
cmp edx, 0 // Test for bytes over mult of 64 |
jz dupend |
// 2 lines added by lcreeve@netins.net |
// (mail 11 Jul 98 in png-implement list) |
cmp edx, 8 //test for less than 8 bytes |
jb duplt8 |
add ecx, edx |
and edx, 0x00000007 // calc bytes over mult of 8 |
sub ecx, edx // drop over bytes from length |
jz duplt8 |
// Loop using MMX registers mm0 & mm1 to update 8 bytes simultaneously |
duplpA: |
movq mm1, [esi+ebx] |
movq mm0, [edi+ebx] |
add ebx, 8 |
paddb mm0, mm1 |
cmp ebx, ecx |
movq [edi+ebx-8], mm0 // movq does not affect flags; -8 to offset add ebx |
jb duplpA |
cmp edx, 0 // Test for bytes over mult of 8 |
jz dupend |
duplt8: |
xor eax, eax |
add ecx, edx // move over byte count into counter |
// Loop using x86 registers to update remaining bytes |
duplp2: |
mov al, [edi + ebx] |
add al, [esi + ebx] |
inc ebx |
cmp ebx, ecx |
mov [edi + ebx-1], al // mov does not affect flags; -1 to offset inc ebx |
jb duplp2 |
dupend: |
// Conversion of filtered row completed |
emms // End MMX instructions; prep for possible FP instrs. |
} // end _asm block |
} |
// Optimized png_read_filter_row routines |
void /* PRIVATE */ |
png_read_filter_row(png_structp png_ptr, png_row_infop row_info, png_bytep |
row, png_bytep prev_row, int filter) |
{ |
#ifdef PNG_DEBUG |
char filnm[10]; |
#endif |
if (mmx_supported == 2) { |
/* this should have happened in png_init_mmx_flags() already */ |
png_warning(png_ptr, "asm_flags may not have been initialized"); |
png_mmx_support(); |
} |
#ifdef PNG_DEBUG |
png_debug(1, "in png_read_filter_row\n"); |
switch (filter) |
{ |
case 0: sprintf(filnm, "none"); |
break; |
case 1: sprintf(filnm, "sub-%s", |
(png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_FILTER_SUB)? "MMX" : "x86"); |
break; |
case 2: sprintf(filnm, "up-%s", |
(png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_FILTER_UP)? "MMX" : "x86"); |
break; |
case 3: sprintf(filnm, "avg-%s", |
(png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_FILTER_AVG)? "MMX" : "x86"); |
break; |
case 4: sprintf(filnm, "Paeth-%s", |
(png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_FILTER_PAETH)? "MMX":"x86"); |
break; |
default: sprintf(filnm, "unknw"); |
break; |
} |
png_debug2(0,"row=%5d, %s, ", png_ptr->row_number, filnm); |
png_debug2(0, "pd=%2d, b=%d, ", (int)row_info->pixel_depth, |
(int)((row_info->pixel_depth + 7) >> 3)); |
png_debug1(0,"len=%8d, ", row_info->rowbytes); |
#endif /* PNG_DEBUG */ |
switch (filter) |
{ |
case PNG_FILTER_VALUE_NONE: |
break; |
case PNG_FILTER_VALUE_SUB: |
{ |
if ((png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_FILTER_SUB) && |
(row_info->pixel_depth >= png_ptr->mmx_bitdepth_threshold) && |
(row_info->rowbytes >= png_ptr->mmx_rowbytes_threshold)) |
{ |
png_read_filter_row_mmx_sub(row_info, row); |
} |
else |
{ |
png_uint_32 i; |
png_uint_32 istop = row_info->rowbytes; |
png_uint_32 bpp = (row_info->pixel_depth + 7) >> 3; |
png_bytep rp = row + bpp; |
png_bytep lp = row; |
for (i = bpp; i < istop; i++) |
{ |
*rp = (png_byte)(((int)(*rp) + (int)(*lp++)) & 0xff); |
rp++; |
} |
} |
break; |
} |
case PNG_FILTER_VALUE_UP: |
{ |
if ((png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_FILTER_UP) && |
(row_info->pixel_depth >= png_ptr->mmx_bitdepth_threshold) && |
(row_info->rowbytes >= png_ptr->mmx_rowbytes_threshold)) |
{ |
png_read_filter_row_mmx_up(row_info, row, prev_row); |
} |
else |
{ |
png_uint_32 i; |
png_uint_32 istop = row_info->rowbytes; |
png_bytep rp = row; |
png_bytep pp = prev_row; |
for (i = 0; i < istop; ++i) |
{ |
*rp = (png_byte)(((int)(*rp) + (int)(*pp++)) & 0xff); |
rp++; |
} |
} |
break; |
} |
case PNG_FILTER_VALUE_AVG: |
{ |
if ((png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_FILTER_AVG) && |
(row_info->pixel_depth >= png_ptr->mmx_bitdepth_threshold) && |
(row_info->rowbytes >= png_ptr->mmx_rowbytes_threshold)) |
{ |
png_read_filter_row_mmx_avg(row_info, row, prev_row); |
} |
else |
{ |
png_uint_32 i; |
png_bytep rp = row; |
png_bytep pp = prev_row; |
png_bytep lp = row; |
png_uint_32 bpp = (row_info->pixel_depth + 7) >> 3; |
png_uint_32 istop = row_info->rowbytes - bpp; |
for (i = 0; i < bpp; i++) |
{ |
*rp = (png_byte)(((int)(*rp) + |
((int)(*pp++) >> 1)) & 0xff); |
rp++; |
} |
for (i = 0; i < istop; i++) |
{ |
*rp = (png_byte)(((int)(*rp) + |
((int)(*pp++ + *lp++) >> 1)) & 0xff); |
rp++; |
} |
} |
break; |
} |
case PNG_FILTER_VALUE_PAETH: |
{ |
if ((png_ptr->asm_flags & PNG_ASM_FLAG_MMX_READ_FILTER_PAETH) && |
(row_info->pixel_depth >= png_ptr->mmx_bitdepth_threshold) && |
(row_info->rowbytes >= png_ptr->mmx_rowbytes_threshold)) |
{ |
png_read_filter_row_mmx_paeth(row_info, row, prev_row); |
} |
else |
{ |
png_uint_32 i; |
png_bytep rp = row; |
png_bytep pp = prev_row; |
png_bytep lp = row; |
png_bytep cp = prev_row; |
png_uint_32 bpp = (row_info->pixel_depth + 7) >> 3; |
png_uint_32 istop=row_info->rowbytes - bpp; |
for (i = 0; i < bpp; i++) |
{ |
*rp = (png_byte)(((int)(*rp) + (int)(*pp++)) & 0xff); |
rp++; |
} |
for (i = 0; i < istop; i++) // use leftover rp,pp |
{ |
int a, b, c, pa, pb, pc, p; |
a = *lp++; |
b = *pp++; |
c = *cp++; |
p = b - c; |
pc = a - c; |
#ifdef PNG_USE_ABS |
pa = abs(p); |
pb = abs(pc); |
pc = abs(p + pc); |
#else |
pa = p < 0 ? -p : p; |
pb = pc < 0 ? -pc : pc; |
pc = (p + pc) < 0 ? -(p + pc) : p + pc; |
#endif |
/* |
if (pa <= pb && pa <= pc) |
p = a; |
else if (pb <= pc) |
p = b; |
else |
p = c; |
*/ |
p = (pa <= pb && pa <=pc) ? a : (pb <= pc) ? b : c; |
*rp = (png_byte)(((int)(*rp) + p) & 0xff); |
rp++; |
} |
} |
break; |
} |
default: |
png_warning(png_ptr, "Ignoring bad row filter type"); |
*row=0; |
break; |
} |
} |
#endif /* PNG_ASSEMBLER_CODE_SUPPORTED && PNG_USE_PNGVCRD */ |
/shark/trunk/ports/png/deflate.c |
---|
0,0 → 1,1350 |
/* deflate.c -- compress data using the deflation algorithm |
* Copyright (C) 1995-2002 Jean-loup Gailly. |
* For conditions of distribution and use, see copyright notice in zlib.h |
*/ |
/* |
* ALGORITHM |
* |
* The "deflation" process depends on being able to identify portions |
* of the input text which are identical to earlier input (within a |
* sliding window trailing behind the input currently being processed). |
* |
* The most straightforward technique turns out to be the fastest for |
* most input files: try all possible matches and select the longest. |
* The key feature of this algorithm is that insertions into the string |
* dictionary are very simple and thus fast, and deletions are avoided |
* completely. Insertions are performed at each input character, whereas |
* string matches are performed only when the previous match ends. So it |
* is preferable to spend more time in matches to allow very fast string |
* insertions and avoid deletions. The matching algorithm for small |
* strings is inspired from that of Rabin & Karp. A brute force approach |
* is used to find longer strings when a small match has been found. |
* A similar algorithm is used in comic (by Jan-Mark Wams) and freeze |
* (by Leonid Broukhis). |
* A previous version of this file used a more sophisticated algorithm |
* (by Fiala and Greene) which is guaranteed to run in linear amortized |
* time, but has a larger average cost, uses more memory and is patented. |
* However the F&G algorithm may be faster for some highly redundant |
* files if the parameter max_chain_length (described below) is too large. |
* |
* ACKNOWLEDGEMENTS |
* |
* The idea of lazy evaluation of matches is due to Jan-Mark Wams, and |
* I found it in 'freeze' written by Leonid Broukhis. |
* Thanks to many people for bug reports and testing. |
* |
* REFERENCES |
* |
* Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". |
* Available in ftp://ds.internic.net/rfc/rfc1951.txt |
* |
* A description of the Rabin and Karp algorithm is given in the book |
* "Algorithms" by R. Sedgewick, Addison-Wesley, p252. |
* |
* Fiala,E.R., and Greene,D.H. |
* Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595 |
* |
*/ |
/* @(#) $Id: deflate.c,v 1.1 2003-03-20 13:08:10 giacomo Exp $ */ |
#include "deflate.h" |
const char deflate_copyright[] = |
" deflate 1.1.4 Copyright 1995-2002 Jean-loup Gailly "; |
/* |
If you use the zlib library in a product, an acknowledgment is welcome |
in the documentation of your product. If for some reason you cannot |
include such an acknowledgment, I would appreciate that you keep this |
copyright string in the executable of your product. |
*/ |
/* =========================================================================== |
* Function prototypes. |
*/ |
typedef enum { |
need_more, /* block not completed, need more input or more output */ |
block_done, /* block flush performed */ |
finish_started, /* finish started, need only more output at next deflate */ |
finish_done /* finish done, accept no more input or output */ |
} block_state; |
typedef block_state (*compress_func) OF((deflate_state *s, int flush)); |
/* Compression function. Returns the block state after the call. */ |
local void fill_window OF((deflate_state *s)); |
local block_state deflate_stored OF((deflate_state *s, int flush)); |
local block_state deflate_fast OF((deflate_state *s, int flush)); |
local block_state deflate_slow OF((deflate_state *s, int flush)); |
local void lm_init OF((deflate_state *s)); |
local void putShortMSB OF((deflate_state *s, uInt b)); |
local void flush_pending OF((z_streamp strm)); |
local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size)); |
#ifdef ASMV |
void match_init OF((void)); /* asm code initialization */ |
uInt longest_match OF((deflate_state *s, IPos cur_match)); |
#else |
local uInt longest_match OF((deflate_state *s, IPos cur_match)); |
#endif |
#ifdef DEBUG |
local void check_match OF((deflate_state *s, IPos start, IPos match, |
int length)); |
#endif |
/* =========================================================================== |
* Local data |
*/ |
#define NIL 0 |
/* Tail of hash chains */ |
#ifndef TOO_FAR |
# define TOO_FAR 4096 |
#endif |
/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ |
#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) |
/* Minimum amount of lookahead, except at the end of the input file. |
* See deflate.c for comments about the MIN_MATCH+1. |
*/ |
/* Values for max_lazy_match, good_match and max_chain_length, depending on |
* the desired pack level (0..9). The values given below have been tuned to |
* exclude worst case performance for pathological files. Better values may be |
* found for specific files. |
*/ |
typedef struct config_s { |
ush good_length; /* reduce lazy search above this match length */ |
ush max_lazy; /* do not perform lazy search above this match length */ |
ush nice_length; /* quit search above this match length */ |
ush max_chain; |
compress_func func; |
} config; |
local const config configuration_table[10] = { |
/* good lazy nice chain */ |
/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ |
/* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */ |
/* 2 */ {4, 5, 16, 8, deflate_fast}, |
/* 3 */ {4, 6, 32, 32, deflate_fast}, |
/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */ |
/* 5 */ {8, 16, 32, 32, deflate_slow}, |
/* 6 */ {8, 16, 128, 128, deflate_slow}, |
/* 7 */ {8, 32, 128, 256, deflate_slow}, |
/* 8 */ {32, 128, 258, 1024, deflate_slow}, |
/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */ |
/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 |
* For deflate_fast() (levels <= 3) good is ignored and lazy has a different |
* meaning. |
*/ |
#define EQUAL 0 |
/* result of memcmp for equal strings */ |
struct static_tree_desc_s {int dummy;}; /* for buggy compilers */ |
/* =========================================================================== |
* Update a hash value with the given input byte |
* IN assertion: all calls to to UPDATE_HASH are made with consecutive |
* input characters, so that a running hash key can be computed from the |
* previous key instead of complete recalculation each time. |
*/ |
#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask) |
/* =========================================================================== |
* Insert string str in the dictionary and set match_head to the previous head |
* of the hash chain (the most recent string with same hash key). Return |
* the previous length of the hash chain. |
* If this file is compiled with -DFASTEST, the compression level is forced |
* to 1, and no hash chains are maintained. |
* IN assertion: all calls to to INSERT_STRING are made with consecutive |
* input characters and the first MIN_MATCH bytes of str are valid |
* (except for the last MIN_MATCH-1 bytes of the input file). |
*/ |
#ifdef FASTEST |
#define INSERT_STRING(s, str, match_head) \ |
(UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ |
match_head = s->head[s->ins_h], \ |
s->head[s->ins_h] = (Pos)(str)) |
#else |
#define INSERT_STRING(s, str, match_head) \ |
(UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ |
s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \ |
s->head[s->ins_h] = (Pos)(str)) |
#endif |
/* =========================================================================== |
* Initialize the hash table (avoiding 64K overflow for 16 bit systems). |
* prev[] will be initialized on the fly. |
*/ |
#define CLEAR_HASH(s) \ |
s->head[s->hash_size-1] = NIL; \ |
zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head)); |
/* ========================================================================= */ |
int ZEXPORT deflateInit_(strm, level, version, stream_size) |
z_streamp strm; |
int level; |
const char *version; |
int stream_size; |
{ |
return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, |
Z_DEFAULT_STRATEGY, version, stream_size); |
/* To do: ignore strm->next_in if we use it as window */ |
} |
/* ========================================================================= */ |
int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, |
version, stream_size) |
z_streamp strm; |
int level; |
int method; |
int windowBits; |
int memLevel; |
int strategy; |
const char *version; |
int stream_size; |
{ |
deflate_state *s; |
int noheader = 0; |
static const char* my_version = ZLIB_VERSION; |
ushf *overlay; |
/* We overlay pending_buf and d_buf+l_buf. This works since the average |
* output size for (length,distance) codes is <= 24 bits. |
*/ |
if (version == Z_NULL || version[0] != my_version[0] || |
stream_size != sizeof(z_stream)) { |
return Z_VERSION_ERROR; |
} |
if (strm == Z_NULL) return Z_STREAM_ERROR; |
strm->msg = Z_NULL; |
if (strm->zalloc == Z_NULL) { |
strm->zalloc = zcalloc; |
strm->opaque = (voidpf)0; |
} |
if (strm->zfree == Z_NULL) strm->zfree = zcfree; |
if (level == Z_DEFAULT_COMPRESSION) level = 6; |
#ifdef FASTEST |
level = 1; |
#endif |
if (windowBits < 0) { /* undocumented feature: suppress zlib header */ |
noheader = 1; |
windowBits = -windowBits; |
} |
if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || |
windowBits < 9 || windowBits > 15 || level < 0 || level > 9 || |
strategy < 0 || strategy > Z_HUFFMAN_ONLY) { |
return Z_STREAM_ERROR; |
} |
s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state)); |
if (s == Z_NULL) return Z_MEM_ERROR; |
strm->state = (struct internal_state FAR *)s; |
s->strm = strm; |
s->noheader = noheader; |
s->w_bits = windowBits; |
s->w_size = 1 << s->w_bits; |
s->w_mask = s->w_size - 1; |
s->hash_bits = memLevel + 7; |
s->hash_size = 1 << s->hash_bits; |
s->hash_mask = s->hash_size - 1; |
s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH); |
s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte)); |
s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); |
s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); |
s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ |
overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); |
s->pending_buf = (uchf *) overlay; |
s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); |
if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || |
s->pending_buf == Z_NULL) { |
strm->msg = (char*)ERR_MSG(Z_MEM_ERROR); |
deflateEnd (strm); |
return Z_MEM_ERROR; |
} |
s->d_buf = overlay + s->lit_bufsize/sizeof(ush); |
s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; |
s->level = level; |
s->strategy = strategy; |
s->method = (Byte)method; |
return deflateReset(strm); |
} |
/* ========================================================================= */ |
int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength) |
z_streamp strm; |
const Bytef *dictionary; |
uInt dictLength; |
{ |
deflate_state *s; |
uInt length = dictLength; |
uInt n; |
IPos hash_head = 0; |
if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL || |
strm->state->status != INIT_STATE) return Z_STREAM_ERROR; |
s = strm->state; |
strm->adler = adler32(strm->adler, dictionary, dictLength); |
if (length < MIN_MATCH) return Z_OK; |
if (length > MAX_DIST(s)) { |
length = MAX_DIST(s); |
#ifndef USE_DICT_HEAD |
dictionary += dictLength - length; /* use the tail of the dictionary */ |
#endif |
} |
zmemcpy(s->window, dictionary, length); |
s->strstart = length; |
s->block_start = (long)length; |
/* Insert all strings in the hash table (except for the last two bytes). |
* s->lookahead stays null, so s->ins_h will be recomputed at the next |
* call of fill_window. |
*/ |
s->ins_h = s->window[0]; |
UPDATE_HASH(s, s->ins_h, s->window[1]); |
for (n = 0; n <= length - MIN_MATCH; n++) { |
INSERT_STRING(s, n, hash_head); |
} |
if (hash_head) hash_head = 0; /* to make compiler happy */ |
return Z_OK; |
} |
/* ========================================================================= */ |
int ZEXPORT deflateReset (strm) |
z_streamp strm; |
{ |
deflate_state *s; |
if (strm == Z_NULL || strm->state == Z_NULL || |
strm->zalloc == Z_NULL || strm->zfree == Z_NULL) return Z_STREAM_ERROR; |
strm->total_in = strm->total_out = 0; |
strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */ |
strm->data_type = Z_UNKNOWN; |
s = (deflate_state *)strm->state; |
s->pending = 0; |
s->pending_out = s->pending_buf; |
if (s->noheader < 0) { |
s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */ |
} |
s->status = s->noheader ? BUSY_STATE : INIT_STATE; |
strm->adler = 1; |
s->last_flush = Z_NO_FLUSH; |
_tr_init(s); |
lm_init(s); |
return Z_OK; |
} |
/* ========================================================================= */ |
int ZEXPORT deflateParams(strm, level, strategy) |
z_streamp strm; |
int level; |
int strategy; |
{ |
deflate_state *s; |
compress_func func; |
int err = Z_OK; |
if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; |
s = strm->state; |
if (level == Z_DEFAULT_COMPRESSION) { |
level = 6; |
} |
if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) { |
return Z_STREAM_ERROR; |
} |
func = configuration_table[s->level].func; |
if (func != configuration_table[level].func && strm->total_in != 0) { |
/* Flush the last buffer: */ |
err = deflate(strm, Z_PARTIAL_FLUSH); |
} |
if (s->level != level) { |
s->level = level; |
s->max_lazy_match = configuration_table[level].max_lazy; |
s->good_match = configuration_table[level].good_length; |
s->nice_match = configuration_table[level].nice_length; |
s->max_chain_length = configuration_table[level].max_chain; |
} |
s->strategy = strategy; |
return err; |
} |
/* ========================================================================= |
* Put a short in the pending buffer. The 16-bit value is put in MSB order. |
* IN assertion: the stream state is correct and there is enough room in |
* pending_buf. |
*/ |
local void putShortMSB (s, b) |
deflate_state *s; |
uInt b; |
{ |
put_byte(s, (Byte)(b >> 8)); |
put_byte(s, (Byte)(b & 0xff)); |
} |
/* ========================================================================= |
* Flush as much pending output as possible. All deflate() output goes |
* through this function so some applications may wish to modify it |
* to avoid allocating a large strm->next_out buffer and copying into it. |
* (See also read_buf()). |
*/ |
local void flush_pending(strm) |
z_streamp strm; |
{ |
unsigned len = strm->state->pending; |
if (len > strm->avail_out) len = strm->avail_out; |
if (len == 0) return; |
zmemcpy(strm->next_out, strm->state->pending_out, len); |
strm->next_out += len; |
strm->state->pending_out += len; |
strm->total_out += len; |
strm->avail_out -= len; |
strm->state->pending -= len; |
if (strm->state->pending == 0) { |
strm->state->pending_out = strm->state->pending_buf; |
} |
} |
/* ========================================================================= */ |
int ZEXPORT deflate (strm, flush) |
z_streamp strm; |
int flush; |
{ |
int old_flush; /* value of flush param for previous deflate call */ |
deflate_state *s; |
if (strm == Z_NULL || strm->state == Z_NULL || |
flush > Z_FINISH || flush < 0) { |
return Z_STREAM_ERROR; |
} |
s = strm->state; |
if (strm->next_out == Z_NULL || |
(strm->next_in == Z_NULL && strm->avail_in != 0) || |
(s->status == FINISH_STATE && flush != Z_FINISH)) { |
ERR_RETURN(strm, Z_STREAM_ERROR); |
} |
if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR); |
s->strm = strm; /* just in case */ |
old_flush = s->last_flush; |
s->last_flush = flush; |
/* Write the zlib header */ |
if (s->status == INIT_STATE) { |
uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8; |
uInt level_flags = (s->level-1) >> 1; |
if (level_flags > 3) level_flags = 3; |
header |= (level_flags << 6); |
if (s->strstart != 0) header |= PRESET_DICT; |
header += 31 - (header % 31); |
s->status = BUSY_STATE; |
putShortMSB(s, header); |
/* Save the adler32 of the preset dictionary: */ |
if (s->strstart != 0) { |
putShortMSB(s, (uInt)(strm->adler >> 16)); |
putShortMSB(s, (uInt)(strm->adler & 0xffff)); |
} |
strm->adler = 1L; |
} |
/* Flush as much pending output as possible */ |
if (s->pending != 0) { |
flush_pending(strm); |
if (strm->avail_out == 0) { |
/* Since avail_out is 0, deflate will be called again with |
* more output space, but possibly with both pending and |
* avail_in equal to zero. There won't be anything to do, |
* but this is not an error situation so make sure we |
* return OK instead of BUF_ERROR at next call of deflate: |
*/ |
s->last_flush = -1; |
return Z_OK; |
} |
/* Make sure there is something to do and avoid duplicate consecutive |
* flushes. For repeated and useless calls with Z_FINISH, we keep |
* returning Z_STREAM_END instead of Z_BUFF_ERROR. |
*/ |
} else if (strm->avail_in == 0 && flush <= old_flush && |
flush != Z_FINISH) { |
ERR_RETURN(strm, Z_BUF_ERROR); |
} |
/* User must not provide more input after the first FINISH: */ |
if (s->status == FINISH_STATE && strm->avail_in != 0) { |
ERR_RETURN(strm, Z_BUF_ERROR); |
} |
/* Start a new block or continue the current one. |
*/ |
if (strm->avail_in != 0 || s->lookahead != 0 || |
(flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { |
block_state bstate; |
bstate = (*(configuration_table[s->level].func))(s, flush); |
if (bstate == finish_started || bstate == finish_done) { |
s->status = FINISH_STATE; |
} |
if (bstate == need_more || bstate == finish_started) { |
if (strm->avail_out == 0) { |
s->last_flush = -1; /* avoid BUF_ERROR next call, see above */ |
} |
return Z_OK; |
/* If flush != Z_NO_FLUSH && avail_out == 0, the next call |
* of deflate should use the same flush parameter to make sure |
* that the flush is complete. So we don't have to output an |
* empty block here, this will be done at next call. This also |
* ensures that for a very small output buffer, we emit at most |
* one empty block. |
*/ |
} |
if (bstate == block_done) { |
if (flush == Z_PARTIAL_FLUSH) { |
_tr_align(s); |
} else { /* FULL_FLUSH or SYNC_FLUSH */ |
_tr_stored_block(s, (char*)0, 0L, 0); |
/* For a full flush, this empty block will be recognized |
* as a special marker by inflate_sync(). |
*/ |
if (flush == Z_FULL_FLUSH) { |
CLEAR_HASH(s); /* forget history */ |
} |
} |
flush_pending(strm); |
if (strm->avail_out == 0) { |
s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */ |
return Z_OK; |
} |
} |
} |
Assert(strm->avail_out > 0, "bug2"); |
if (flush != Z_FINISH) return Z_OK; |
if (s->noheader) return Z_STREAM_END; |
/* Write the zlib trailer (adler32) */ |
putShortMSB(s, (uInt)(strm->adler >> 16)); |
putShortMSB(s, (uInt)(strm->adler & 0xffff)); |
flush_pending(strm); |
/* If avail_out is zero, the application will call deflate again |
* to flush the rest. |
*/ |
s->noheader = -1; /* write the trailer only once! */ |
return s->pending != 0 ? Z_OK : Z_STREAM_END; |
} |
/* ========================================================================= */ |
int ZEXPORT deflateEnd (strm) |
z_streamp strm; |
{ |
int status; |
if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; |
status = strm->state->status; |
if (status != INIT_STATE && status != BUSY_STATE && |
status != FINISH_STATE) { |
return Z_STREAM_ERROR; |
} |
/* Deallocate in reverse order of allocations: */ |
TRY_FREE(strm, strm->state->pending_buf); |
TRY_FREE(strm, strm->state->head); |
TRY_FREE(strm, strm->state->prev); |
TRY_FREE(strm, strm->state->window); |
ZFREE(strm, strm->state); |
strm->state = Z_NULL; |
return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; |
} |
/* ========================================================================= |
* Copy the source state to the destination state. |
* To simplify the source, this is not supported for 16-bit MSDOS (which |
* doesn't have enough memory anyway to duplicate compression states). |
*/ |
int ZEXPORT deflateCopy (dest, source) |
z_streamp dest; |
z_streamp source; |
{ |
#ifdef MAXSEG_64K |
return Z_STREAM_ERROR; |
#else |
deflate_state *ds; |
deflate_state *ss; |
ushf *overlay; |
if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) { |
return Z_STREAM_ERROR; |
} |
ss = source->state; |
*dest = *source; |
ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); |
if (ds == Z_NULL) return Z_MEM_ERROR; |
dest->state = (struct internal_state FAR *) ds; |
*ds = *ss; |
ds->strm = dest; |
ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); |
ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); |
ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); |
overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); |
ds->pending_buf = (uchf *) overlay; |
if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || |
ds->pending_buf == Z_NULL) { |
deflateEnd (dest); |
return Z_MEM_ERROR; |
} |
/* following zmemcpy do not work for 16-bit MSDOS */ |
zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); |
zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos)); |
zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos)); |
zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); |
ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); |
ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); |
ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; |
ds->l_desc.dyn_tree = ds->dyn_ltree; |
ds->d_desc.dyn_tree = ds->dyn_dtree; |
ds->bl_desc.dyn_tree = ds->bl_tree; |
return Z_OK; |
#endif |
} |
/* =========================================================================== |
* Read a new buffer from the current input stream, update the adler32 |
* and total number of bytes read. All deflate() input goes through |
* this function so some applications may wish to modify it to avoid |
* allocating a large strm->next_in buffer and copying from it. |
* (See also flush_pending()). |
*/ |
local int read_buf(strm, buf, size) |
z_streamp strm; |
Bytef *buf; |
unsigned size; |
{ |
unsigned len = strm->avail_in; |
if (len > size) len = size; |
if (len == 0) return 0; |
strm->avail_in -= len; |
if (!strm->state->noheader) { |
strm->adler = adler32(strm->adler, strm->next_in, len); |
} |
zmemcpy(buf, strm->next_in, len); |
strm->next_in += len; |
strm->total_in += len; |
return (int)len; |
} |
/* =========================================================================== |
* Initialize the "longest match" routines for a new zlib stream |
*/ |
local void lm_init (s) |
deflate_state *s; |
{ |
s->window_size = (ulg)2L*s->w_size; |
CLEAR_HASH(s); |
/* Set the default configuration parameters: |
*/ |
s->max_lazy_match = configuration_table[s->level].max_lazy; |
s->good_match = configuration_table[s->level].good_length; |
s->nice_match = configuration_table[s->level].nice_length; |
s->max_chain_length = configuration_table[s->level].max_chain; |
s->strstart = 0; |
s->block_start = 0L; |
s->lookahead = 0; |
s->match_length = s->prev_length = MIN_MATCH-1; |
s->match_available = 0; |
s->ins_h = 0; |
#ifdef ASMV |
match_init(); /* initialize the asm code */ |
#endif |
} |
/* =========================================================================== |
* Set match_start to the longest match starting at the given string and |
* return its length. Matches shorter or equal to prev_length are discarded, |
* in which case the result is equal to prev_length and match_start is |
* garbage. |
* IN assertions: cur_match is the head of the hash chain for the current |
* string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 |
* OUT assertion: the match length is not greater than s->lookahead. |
*/ |
#ifndef ASMV |
/* For 80x86 and 680x0, an optimized version will be provided in match.asm or |
* match.S. The code will be functionally equivalent. |
*/ |
#ifndef FASTEST |
local uInt longest_match(s, cur_match) |
deflate_state *s; |
IPos cur_match; /* current match */ |
{ |
unsigned chain_length = s->max_chain_length;/* max hash chain length */ |
register Bytef *scan = s->window + s->strstart; /* current string */ |
register Bytef *match; /* matched string */ |
register int len; /* length of current match */ |
int best_len = s->prev_length; /* best match length so far */ |
int nice_match = s->nice_match; /* stop if match long enough */ |
IPos limit = s->strstart > (IPos)MAX_DIST(s) ? |
s->strstart - (IPos)MAX_DIST(s) : NIL; |
/* Stop when cur_match becomes <= limit. To simplify the code, |
* we prevent matches with the string of window index 0. |
*/ |
Posf *prev = s->prev; |
uInt wmask = s->w_mask; |
#ifdef UNALIGNED_OK |
/* Compare two bytes at a time. Note: this is not always beneficial. |
* Try with and without -DUNALIGNED_OK to check. |
*/ |
register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; |
register ush scan_start = *(ushf*)scan; |
register ush scan_end = *(ushf*)(scan+best_len-1); |
#else |
register Bytef *strend = s->window + s->strstart + MAX_MATCH; |
register Byte scan_end1 = scan[best_len-1]; |
register Byte scan_end = scan[best_len]; |
#endif |
/* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. |
* It is easy to get rid of this optimization if necessary. |
*/ |
Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); |
/* Do not waste too much time if we already have a good match: */ |
if (s->prev_length >= s->good_match) { |
chain_length >>= 2; |
} |
/* Do not look for matches beyond the end of the input. This is necessary |
* to make deflate deterministic. |
*/ |
if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; |
Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); |
do { |
Assert(cur_match < s->strstart, "no future"); |
match = s->window + cur_match; |
/* Skip to next match if the match length cannot increase |
* or if the match length is less than 2: |
*/ |
#if (defined(UNALIGNED_OK) && MAX_MATCH == 258) |
/* This code assumes sizeof(unsigned short) == 2. Do not use |
* UNALIGNED_OK if your compiler uses a different size. |
*/ |
if (*(ushf*)(match+best_len-1) != scan_end || |
*(ushf*)match != scan_start) continue; |
/* It is not necessary to compare scan[2] and match[2] since they are |
* always equal when the other bytes match, given that the hash keys |
* are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at |
* strstart+3, +5, ... up to strstart+257. We check for insufficient |
* lookahead only every 4th comparison; the 128th check will be made |
* at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is |
* necessary to put more guard bytes at the end of the window, or |
* to check more often for insufficient lookahead. |
*/ |
Assert(scan[2] == match[2], "scan[2]?"); |
scan++, match++; |
do { |
} while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) && |
*(ushf*)(scan+=2) == *(ushf*)(match+=2) && |
*(ushf*)(scan+=2) == *(ushf*)(match+=2) && |
*(ushf*)(scan+=2) == *(ushf*)(match+=2) && |
scan < strend); |
/* The funny "do {}" generates better code on most compilers */ |
/* Here, scan <= window+strstart+257 */ |
Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); |
if (*scan == *match) scan++; |
len = (MAX_MATCH - 1) - (int)(strend-scan); |
scan = strend - (MAX_MATCH-1); |
#else /* UNALIGNED_OK */ |
if (match[best_len] != scan_end || |
match[best_len-1] != scan_end1 || |
*match != *scan || |
*++match != scan[1]) continue; |
/* The check at best_len-1 can be removed because it will be made |
* again later. (This heuristic is not always a win.) |
* It is not necessary to compare scan[2] and match[2] since they |
* are always equal when the other bytes match, given that |
* the hash keys are equal and that HASH_BITS >= 8. |
*/ |
scan += 2, match++; |
Assert(*scan == *match, "match[2]?"); |
/* We check for insufficient lookahead only every 8th comparison; |
* the 256th check will be made at strstart+258. |
*/ |
do { |
} while (*++scan == *++match && *++scan == *++match && |
*++scan == *++match && *++scan == *++match && |
*++scan == *++match && *++scan == *++match && |
*++scan == *++match && *++scan == *++match && |
scan < strend); |
Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); |
len = MAX_MATCH - (int)(strend - scan); |
scan = strend - MAX_MATCH; |
#endif /* UNALIGNED_OK */ |
if (len > best_len) { |
s->match_start = cur_match; |
best_len = len; |
if (len >= nice_match) break; |
#ifdef UNALIGNED_OK |
scan_end = *(ushf*)(scan+best_len-1); |
#else |
scan_end1 = scan[best_len-1]; |
scan_end = scan[best_len]; |
#endif |
} |
} while ((cur_match = prev[cur_match & wmask]) > limit |
&& --chain_length != 0); |
if ((uInt)best_len <= s->lookahead) return (uInt)best_len; |
return s->lookahead; |
} |
#else /* FASTEST */ |
/* --------------------------------------------------------------------------- |
* Optimized version for level == 1 only |
*/ |
local uInt longest_match(s, cur_match) |
deflate_state *s; |
IPos cur_match; /* current match */ |
{ |
register Bytef *scan = s->window + s->strstart; /* current string */ |
register Bytef *match; /* matched string */ |
register int len; /* length of current match */ |
register Bytef *strend = s->window + s->strstart + MAX_MATCH; |
/* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. |
* It is easy to get rid of this optimization if necessary. |
*/ |
Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); |
Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); |
Assert(cur_match < s->strstart, "no future"); |
match = s->window + cur_match; |
/* Return failure if the match length is less than 2: |
*/ |
if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1; |
/* The check at best_len-1 can be removed because it will be made |
* again later. (This heuristic is not always a win.) |
* It is not necessary to compare scan[2] and match[2] since they |
* are always equal when the other bytes match, given that |
* the hash keys are equal and that HASH_BITS >= 8. |
*/ |
scan += 2, match += 2; |
Assert(*scan == *match, "match[2]?"); |
/* We check for insufficient lookahead only every 8th comparison; |
* the 256th check will be made at strstart+258. |
*/ |
do { |
} while (*++scan == *++match && *++scan == *++match && |
*++scan == *++match && *++scan == *++match && |
*++scan == *++match && *++scan == *++match && |
*++scan == *++match && *++scan == *++match && |
scan < strend); |
Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); |
len = MAX_MATCH - (int)(strend - scan); |
if (len < MIN_MATCH) return MIN_MATCH - 1; |
s->match_start = cur_match; |
return len <= s->lookahead ? len : s->lookahead; |
} |
#endif /* FASTEST */ |
#endif /* ASMV */ |
#ifdef DEBUG |
/* =========================================================================== |
* Check that the match at match_start is indeed a match. |
*/ |
local void check_match(s, start, match, length) |
deflate_state *s; |
IPos start, match; |
int length; |
{ |
/* check that the match is indeed a match */ |
if (zmemcmp(s->window + match, |
s->window + start, length) != EQUAL) { |
cprintf(" start %u, match %u, length %d\n", |
start, match, length); |
do { |
cprintf("%c%c", s->window[match++], s->window[start++]); |
} while (--length != 0); |
z_error("invalid match"); |
} |
if (z_verbose > 1) { |
cprintf("\\[%d,%d]", start-match, length); |
do { putc(s->window[start++], stderr); } while (--length != 0); |
} |
} |
#else |
# define check_match(s, start, match, length) |
#endif |
/* =========================================================================== |
* Fill the window when the lookahead becomes insufficient. |
* Updates strstart and lookahead. |
* |
* IN assertion: lookahead < MIN_LOOKAHEAD |
* OUT assertions: strstart <= window_size-MIN_LOOKAHEAD |
* At least one byte has been read, or avail_in == 0; reads are |
* performed for at least two bytes (required for the zip translate_eol |
* option -- not supported here). |
*/ |
local void fill_window(s) |
deflate_state *s; |
{ |
register unsigned n, m; |
register Posf *p; |
unsigned more; /* Amount of free space at the end of the window. */ |
uInt wsize = s->w_size; |
do { |
more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); |
/* Deal with !@#$% 64K limit: */ |
if (more == 0 && s->strstart == 0 && s->lookahead == 0) { |
more = wsize; |
} else if (more == (unsigned)(-1)) { |
/* Very unlikely, but possible on 16 bit machine if strstart == 0 |
* and lookahead == 1 (input done one byte at time) |
*/ |
more--; |
/* If the window is almost full and there is insufficient lookahead, |
* move the upper half to the lower one to make room in the upper half. |
*/ |
} else if (s->strstart >= wsize+MAX_DIST(s)) { |
zmemcpy(s->window, s->window+wsize, (unsigned)wsize); |
s->match_start -= wsize; |
s->strstart -= wsize; /* we now have strstart >= MAX_DIST */ |
s->block_start -= (long) wsize; |
/* Slide the hash table (could be avoided with 32 bit values |
at the expense of memory usage). We slide even when level == 0 |
to keep the hash table consistent if we switch back to level > 0 |
later. (Using level 0 permanently is not an optimal usage of |
zlib, so we don't care about this pathological case.) |
*/ |
n = s->hash_size; |
p = &s->head[n]; |
do { |
m = *--p; |
*p = (Pos)(m >= wsize ? m-wsize : NIL); |
} while (--n); |
n = wsize; |
#ifndef FASTEST |
p = &s->prev[n]; |
do { |
m = *--p; |
*p = (Pos)(m >= wsize ? m-wsize : NIL); |
/* If n is not on any hash chain, prev[n] is garbage but |
* its value will never be used. |
*/ |
} while (--n); |
#endif |
more += wsize; |
} |
if (s->strm->avail_in == 0) return; |
/* If there was no sliding: |
* strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && |
* more == window_size - lookahead - strstart |
* => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) |
* => more >= window_size - 2*WSIZE + 2 |
* In the BIG_MEM or MMAP case (not yet supported), |
* window_size == input_size + MIN_LOOKAHEAD && |
* strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. |
* Otherwise, window_size == 2*WSIZE so more >= 2. |
* If there was sliding, more >= WSIZE. So in all cases, more >= 2. |
*/ |
Assert(more >= 2, "more < 2"); |
n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more); |
s->lookahead += n; |
/* Initialize the hash value now that we have some input: */ |
if (s->lookahead >= MIN_MATCH) { |
s->ins_h = s->window[s->strstart]; |
UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); |
#if MIN_MATCH != 3 |
Call UPDATE_HASH() MIN_MATCH-3 more times |
#endif |
} |
/* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, |
* but this is not important since only literal bytes will be emitted. |
*/ |
} while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); |
} |
/* =========================================================================== |
* Flush the current block, with given end-of-file flag. |
* IN assertion: strstart is set to the end of the current match. |
*/ |
#define FLUSH_BLOCK_ONLY(s, eof) { \ |
_tr_flush_block(s, (s->block_start >= 0L ? \ |
(charf *)&s->window[(unsigned)s->block_start] : \ |
(charf *)Z_NULL), \ |
(ulg)((long)s->strstart - s->block_start), \ |
(eof)); \ |
s->block_start = s->strstart; \ |
flush_pending(s->strm); \ |
Tracev((stderr,"[FLUSH]")); \ |
} |
/* Same but force premature exit if necessary. */ |
#define FLUSH_BLOCK(s, eof) { \ |
FLUSH_BLOCK_ONLY(s, eof); \ |
if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \ |
} |
/* =========================================================================== |
* Copy without compression as much as possible from the input stream, return |
* the current block state. |
* This function does not insert new strings in the dictionary since |
* uncompressible data is probably not useful. This function is used |
* only for the level=0 compression option. |
* NOTE: this function should be optimized to avoid extra copying from |
* window to pending_buf. |
*/ |
local block_state deflate_stored(s, flush) |
deflate_state *s; |
int flush; |
{ |
/* Stored blocks are limited to 0xffff bytes, pending_buf is limited |
* to pending_buf_size, and each stored block has a 5 byte header: |
*/ |
ulg max_block_size = 0xffff; |
ulg max_start; |
if (max_block_size > s->pending_buf_size - 5) { |
max_block_size = s->pending_buf_size - 5; |
} |
/* Copy as much as possible from input to output: */ |
for (;;) { |
/* Fill the window as much as possible: */ |
if (s->lookahead <= 1) { |
Assert(s->strstart < s->w_size+MAX_DIST(s) || |
s->block_start >= (long)s->w_size, "slide too late"); |
fill_window(s); |
if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more; |
if (s->lookahead == 0) break; /* flush the current block */ |
} |
Assert(s->block_start >= 0L, "block gone"); |
s->strstart += s->lookahead; |
s->lookahead = 0; |
/* Emit a stored block if pending_buf will be full: */ |
max_start = s->block_start + max_block_size; |
if (s->strstart == 0 || (ulg)s->strstart >= max_start) { |
/* strstart == 0 is possible when wraparound on 16-bit machine */ |
s->lookahead = (uInt)(s->strstart - max_start); |
s->strstart = (uInt)max_start; |
FLUSH_BLOCK(s, 0); |
} |
/* Flush if we may have to slide, otherwise block_start may become |
* negative and the data will be gone: |
*/ |
if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) { |
FLUSH_BLOCK(s, 0); |
} |
} |
FLUSH_BLOCK(s, flush == Z_FINISH); |
return flush == Z_FINISH ? finish_done : block_done; |
} |
/* =========================================================================== |
* Compress as much as possible from the input stream, return the current |
* block state. |
* This function does not perform lazy evaluation of matches and inserts |
* new strings in the dictionary only for unmatched strings or for short |
* matches. It is used only for the fast compression options. |
*/ |
local block_state deflate_fast(s, flush) |
deflate_state *s; |
int flush; |
{ |
IPos hash_head = NIL; /* head of the hash chain */ |
int bflush; /* set if current block must be flushed */ |
for (;;) { |
/* Make sure that we always have enough lookahead, except |
* at the end of the input file. We need MAX_MATCH bytes |
* for the next match, plus MIN_MATCH bytes to insert the |
* string following the next match. |
*/ |
if (s->lookahead < MIN_LOOKAHEAD) { |
fill_window(s); |
if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { |
return need_more; |
} |
if (s->lookahead == 0) break; /* flush the current block */ |
} |
/* Insert the string window[strstart .. strstart+2] in the |
* dictionary, and set hash_head to the head of the hash chain: |
*/ |
if (s->lookahead >= MIN_MATCH) { |
INSERT_STRING(s, s->strstart, hash_head); |
} |
/* Find the longest match, discarding those <= prev_length. |
* At this point we have always match_length < MIN_MATCH |
*/ |
if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) { |
/* To simplify the code, we prevent matches with the string |
* of window index 0 (in particular we have to avoid a match |
* of the string with itself at the start of the input file). |
*/ |
if (s->strategy != Z_HUFFMAN_ONLY) { |
s->match_length = longest_match (s, hash_head); |
} |
/* longest_match() sets match_start */ |
} |
if (s->match_length >= MIN_MATCH) { |
check_match(s, s->strstart, s->match_start, s->match_length); |
_tr_tally_dist(s, s->strstart - s->match_start, |
s->match_length - MIN_MATCH, bflush); |
s->lookahead -= s->match_length; |
/* Insert new strings in the hash table only if the match length |
* is not too large. This saves time but degrades compression. |
*/ |
#ifndef FASTEST |
if (s->match_length <= s->max_insert_length && |
s->lookahead >= MIN_MATCH) { |
s->match_length--; /* string at strstart already in hash table */ |
do { |
s->strstart++; |
INSERT_STRING(s, s->strstart, hash_head); |
/* strstart never exceeds WSIZE-MAX_MATCH, so there are |
* always MIN_MATCH bytes ahead. |
*/ |
} while (--s->match_length != 0); |
s->strstart++; |
} else |
#endif |
{ |
s->strstart += s->match_length; |
s->match_length = 0; |
s->ins_h = s->window[s->strstart]; |
UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); |
#if MIN_MATCH != 3 |
Call UPDATE_HASH() MIN_MATCH-3 more times |
#endif |
/* If lookahead < MIN_MATCH, ins_h is garbage, but it does not |
* matter since it will be recomputed at next deflate call. |
*/ |
} |
} else { |
/* No match, output a literal byte */ |
Tracevv((stderr,"%c", s->window[s->strstart])); |
_tr_tally_lit (s, s->window[s->strstart], bflush); |
s->lookahead--; |
s->strstart++; |
} |
if (bflush) FLUSH_BLOCK(s, 0); |
} |
FLUSH_BLOCK(s, flush == Z_FINISH); |
return flush == Z_FINISH ? finish_done : block_done; |
} |
/* =========================================================================== |
* Same as above, but achieves better compression. We use a lazy |
* evaluation for matches: a match is finally adopted only if there is |
* no better match at the next window position. |
*/ |
local block_state deflate_slow(s, flush) |
deflate_state *s; |
int flush; |
{ |
IPos hash_head = NIL; /* head of hash chain */ |
int bflush; /* set if current block must be flushed */ |
/* Process the input block. */ |
for (;;) { |
/* Make sure that we always have enough lookahead, except |
* at the end of the input file. We need MAX_MATCH bytes |
* for the next match, plus MIN_MATCH bytes to insert the |
* string following the next match. |
*/ |
if (s->lookahead < MIN_LOOKAHEAD) { |
fill_window(s); |
if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { |
return need_more; |
} |
if (s->lookahead == 0) break; /* flush the current block */ |
} |
/* Insert the string window[strstart .. strstart+2] in the |
* dictionary, and set hash_head to the head of the hash chain: |
*/ |
if (s->lookahead >= MIN_MATCH) { |
INSERT_STRING(s, s->strstart, hash_head); |
} |
/* Find the longest match, discarding those <= prev_length. |
*/ |
s->prev_length = s->match_length, s->prev_match = s->match_start; |
s->match_length = MIN_MATCH-1; |
if (hash_head != NIL && s->prev_length < s->max_lazy_match && |
s->strstart - hash_head <= MAX_DIST(s)) { |
/* To simplify the code, we prevent matches with the string |
* of window index 0 (in particular we have to avoid a match |
* of the string with itself at the start of the input file). |
*/ |
if (s->strategy != Z_HUFFMAN_ONLY) { |
s->match_length = longest_match (s, hash_head); |
} |
/* longest_match() sets match_start */ |
if (s->match_length <= 5 && (s->strategy == Z_FILTERED || |
(s->match_length == MIN_MATCH && |
s->strstart - s->match_start > TOO_FAR))) { |
/* If prev_match is also MIN_MATCH, match_start is garbage |
* but we will ignore the current match anyway. |
*/ |
s->match_length = MIN_MATCH-1; |
} |
} |
/* If there was a match at the previous step and the current |
* match is not better, output the previous match: |
*/ |
if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) { |
uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; |
/* Do not insert strings in hash table beyond this. */ |
check_match(s, s->strstart-1, s->prev_match, s->prev_length); |
_tr_tally_dist(s, s->strstart -1 - s->prev_match, |
s->prev_length - MIN_MATCH, bflush); |
/* Insert in hash table all strings up to the end of the match. |
* strstart-1 and strstart are already inserted. If there is not |
* enough lookahead, the last two strings are not inserted in |
* the hash table. |
*/ |
s->lookahead -= s->prev_length-1; |
s->prev_length -= 2; |
do { |
if (++s->strstart <= max_insert) { |
INSERT_STRING(s, s->strstart, hash_head); |
} |
} while (--s->prev_length != 0); |
s->match_available = 0; |
s->match_length = MIN_MATCH-1; |
s->strstart++; |
if (bflush) FLUSH_BLOCK(s, 0); |
} else if (s->match_available) { |
/* If there was no match at the previous position, output a |
* single literal. If there was a match but the current match |
* is longer, truncate the previous match to a single literal. |
*/ |
Tracevv((stderr,"%c", s->window[s->strstart-1])); |
_tr_tally_lit(s, s->window[s->strstart-1], bflush); |
if (bflush) { |
FLUSH_BLOCK_ONLY(s, 0); |
} |
s->strstart++; |
s->lookahead--; |
if (s->strm->avail_out == 0) return need_more; |
} else { |
/* There is no previous match to compare with, wait for |
* the next step to decide. |
*/ |
s->match_available = 1; |
s->strstart++; |
s->lookahead--; |
} |
} |
Assert (flush != Z_NO_FLUSH, "no flush?"); |
if (s->match_available) { |
Tracevv((stderr,"%c", s->window[s->strstart-1])); |
_tr_tally_lit(s, s->window[s->strstart-1], bflush); |
s->match_available = 0; |
} |
FLUSH_BLOCK(s, flush == Z_FINISH); |
return flush == Z_FINISH ? finish_done : block_done; |
} |
/shark/trunk/ports/png/zutil.c |
---|
0,0 → 1,225 |
/* zutil.c -- target dependent utility functions for the compression library |
* Copyright (C) 1995-2002 Jean-loup Gailly. |
* For conditions of distribution and use, see copyright notice in zlib.h |
*/ |
/* @(#) $Id: zutil.c,v 1.1 2003-03-20 13:08:13 giacomo Exp $ */ |
#include "zutil.h" |
struct internal_state {int dummy;}; /* for buggy compilers */ |
#ifndef STDC |
extern void exit OF((int)); |
#endif |
const char *z_errmsg[10] = { |
"need dictionary", /* Z_NEED_DICT 2 */ |
"stream end", /* Z_STREAM_END 1 */ |
"", /* Z_OK 0 */ |
"file error", /* Z_ERRNO (-1) */ |
"stream error", /* Z_STREAM_ERROR (-2) */ |
"data error", /* Z_DATA_ERROR (-3) */ |
"insufficient memory", /* Z_MEM_ERROR (-4) */ |
"buffer error", /* Z_BUF_ERROR (-5) */ |
"incompatible version",/* Z_VERSION_ERROR (-6) */ |
""}; |
const char * ZEXPORT zlibVersion() |
{ |
return ZLIB_VERSION; |
} |
#ifdef DEBUG |
# ifndef verbose |
# define verbose 0 |
# endif |
int z_verbose = verbose; |
void z_error (m) |
char *m; |
{ |
cprintf("%s\n", m); |
exit(1); |
} |
#endif |
/* exported to allow conversion of error code to string for compress() and |
* uncompress() |
*/ |
const char * ZEXPORT zError(err) |
int err; |
{ |
return ERR_MSG(err); |
} |
#ifndef HAVE_MEMCPY |
void zmemcpy(dest, source, len) |
Bytef* dest; |
const Bytef* source; |
uInt len; |
{ |
if (len == 0) return; |
do { |
*dest++ = *source++; /* ??? to be unrolled */ |
} while (--len != 0); |
} |
int zmemcmp(s1, s2, len) |
const Bytef* s1; |
const Bytef* s2; |
uInt len; |
{ |
uInt j; |
for (j = 0; j < len; j++) { |
if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1; |
} |
return 0; |
} |
void zmemzero(dest, len) |
Bytef* dest; |
uInt len; |
{ |
if (len == 0) return; |
do { |
*dest++ = 0; /* ??? to be unrolled */ |
} while (--len != 0); |
} |
#endif |
#ifdef __TURBOC__ |
#if (defined( __BORLANDC__) || !defined(SMALL_MEDIUM)) && !defined(__32BIT__) |
/* Small and medium model in Turbo C are for now limited to near allocation |
* with reduced MAX_WBITS and MAX_MEM_LEVEL |
*/ |
# define MY_ZCALLOC |
/* Turbo C malloc() does not allow dynamic allocation of 64K bytes |
* and farmalloc(64K) returns a pointer with an offset of 8, so we |
* must fix the pointer. Warning: the pointer must be put back to its |
* original form in order to free it, use zcfree(). |
*/ |
#define MAX_PTR 10 |
/* 10*64K = 640K */ |
local int next_ptr = 0; |
typedef struct ptr_table_s { |
voidpf org_ptr; |
voidpf new_ptr; |
} ptr_table; |
local ptr_table table[MAX_PTR]; |
/* This table is used to remember the original form of pointers |
* to large buffers (64K). Such pointers are normalized with a zero offset. |
* Since MSDOS is not a preemptive multitasking OS, this table is not |
* protected from concurrent access. This hack doesn't work anyway on |
* a protected system like OS/2. Use Microsoft C instead. |
*/ |
voidpf zcalloc (voidpf opaque, unsigned items, unsigned size) |
{ |
voidpf buf = opaque; /* just to make some compilers happy */ |
ulg bsize = (ulg)items*size; |
/* If we allocate less than 65520 bytes, we assume that farmalloc |
* will return a usable pointer which doesn't have to be normalized. |
*/ |
if (bsize < 65520L) { |
buf = farmalloc(bsize); |
if (*(ush*)&buf != 0) return buf; |
} else { |
buf = farmalloc(bsize + 16L); |
} |
if (buf == NULL || next_ptr >= MAX_PTR) return NULL; |
table[next_ptr].org_ptr = buf; |
/* Normalize the pointer to seg:0 */ |
*((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4; |
*(ush*)&buf = 0; |
table[next_ptr++].new_ptr = buf; |
return buf; |
} |
void zcfree (voidpf opaque, voidpf ptr) |
{ |
int n; |
if (*(ush*)&ptr != 0) { /* object < 64K */ |
farfree(ptr); |
return; |
} |
/* Find the original pointer */ |
for (n = 0; n < next_ptr; n++) { |
if (ptr != table[n].new_ptr) continue; |
farfree(table[n].org_ptr); |
while (++n < next_ptr) { |
table[n-1] = table[n]; |
} |
next_ptr--; |
return; |
} |
ptr = opaque; /* just to make some compilers happy */ |
Assert(0, "zcfree: ptr not found"); |
} |
#endif |
#endif /* __TURBOC__ */ |
#if defined(M_I86) && !defined(__32BIT__) |
/* Microsoft C in 16-bit mode */ |
# define MY_ZCALLOC |
#if (!defined(_MSC_VER) || (_MSC_VER <= 600)) |
# define _halloc halloc |
# define _hfree hfree |
#endif |
voidpf zcalloc (voidpf opaque, unsigned items, unsigned size) |
{ |
if (opaque) opaque = 0; /* to make compiler happy */ |
return _halloc((long)items, size); |
} |
void zcfree (voidpf opaque, voidpf ptr) |
{ |
if (opaque) opaque = 0; /* to make compiler happy */ |
_hfree(ptr); |
} |
#endif /* MSC */ |
#ifndef MY_ZCALLOC /* Any system without a special alloc function */ |
#ifndef STDC |
extern voidp calloc OF((uInt items, uInt size)); |
extern void free OF((voidpf ptr)); |
#endif |
voidpf zcalloc (opaque, items, size) |
voidpf opaque; |
unsigned items; |
unsigned size; |
{ |
if (opaque) items += size - size; /* make compiler happy */ |
return (voidpf)calloc(items, size); |
} |
void zcfree (opaque, ptr) |
voidpf opaque; |
voidpf ptr; |
{ |
free(ptr); |
if (opaque) return; /* make compiler happy */ |
} |
#endif /* MY_ZCALLOC */ |
/shark/trunk/ports/png/gzio.c |
---|
0,0 → 1,875 |
/* gzio.c -- IO on .gz files |
* Copyright (C) 1995-2002 Jean-loup Gailly. |
* For conditions of distribution and use, see copyright notice in zlib.h |
* |
* Compile this file with -DNO_DEFLATE to avoid the compression code. |
*/ |
/* @(#) $Id: gzio.c,v 1.1 2003-03-20 13:08:10 giacomo Exp $ */ |
#include <stdio.h> |
#include "zutil.h" |
struct internal_state {int dummy;}; /* for buggy compilers */ |
#ifndef Z_BUFSIZE |
# ifdef MAXSEG_64K |
# define Z_BUFSIZE 4096 /* minimize memory usage for 16-bit DOS */ |
# else |
# define Z_BUFSIZE 16384 |
# endif |
#endif |
#ifndef Z_PRINTF_BUFSIZE |
# define Z_PRINTF_BUFSIZE 4096 |
#endif |
#define ALLOC(size) malloc(size) |
#define TRYFREE(p) {if (p) free(p);} |
static int gz_magic[2] = {0x1f, 0x8b}; /* gzip magic header */ |
/* gzip flag byte */ |
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */ |
#define HEAD_CRC 0x02 /* bit 1 set: header CRC present */ |
#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ |
#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ |
#define COMMENT 0x10 /* bit 4 set: file comment present */ |
#define RESERVED 0xE0 /* bits 5..7: reserved */ |
typedef struct gz_stream { |
z_stream stream; |
int z_err; /* error code for last stream operation */ |
int z_eof; /* set if end of input file */ |
FILE *file; /* .gz file */ |
Byte *inbuf; /* input buffer */ |
Byte *outbuf; /* output buffer */ |
uLong crc; /* crc32 of uncompressed data */ |
char *msg; /* error message */ |
char *path; /* path name for debugging only */ |
int transparent; /* 1 if input file is not a .gz file */ |
char mode; /* 'w' or 'r' */ |
long startpos; /* start of compressed data in file (header skipped) */ |
} gz_stream; |
local gzFile gz_open OF((const char *path, const char *mode, int fd)); |
local int do_flush OF((gzFile file, int flush)); |
local int get_byte OF((gz_stream *s)); |
local void check_header OF((gz_stream *s)); |
local int destroy OF((gz_stream *s)); |
local void putLong OF((FILE *file, uLong x)); |
local uLong getLong OF((gz_stream *s)); |
/* =========================================================================== |
Opens a gzip (.gz) file for reading or writing. The mode parameter |
is as in fopen ("rb" or "wb"). The file is given either by file descriptor |
or path name (if fd == -1). |
gz_open return NULL if the file could not be opened or if there was |
insufficient memory to allocate the (de)compression state; errno |
can be checked to distinguish the two cases (if errno is zero, the |
zlib error is Z_MEM_ERROR). |
*/ |
local gzFile gz_open (path, mode, fd) |
const char *path; |
const char *mode; |
int fd; |
{ |
int err; |
int level = Z_DEFAULT_COMPRESSION; /* compression level */ |
int strategy = Z_DEFAULT_STRATEGY; /* compression strategy */ |
char *p = (char*)mode; |
gz_stream *s; |
char fmode[80]; /* copy of mode, without the compression level */ |
char *m = fmode; |
if (!path || !mode) return Z_NULL; |
s = (gz_stream *)ALLOC(sizeof(gz_stream)); |
if (!s) return Z_NULL; |
s->stream.zalloc = (alloc_func)0; |
s->stream.zfree = (free_func)0; |
s->stream.opaque = (voidpf)0; |
s->stream.next_in = s->inbuf = Z_NULL; |
s->stream.next_out = s->outbuf = Z_NULL; |
s->stream.avail_in = s->stream.avail_out = 0; |
s->file = NULL; |
s->z_err = Z_OK; |
s->z_eof = 0; |
s->crc = crc32(0L, Z_NULL, 0); |
s->msg = NULL; |
s->transparent = 0; |
s->path = (char*)ALLOC(strlen(path)+1); |
if (s->path == NULL) { |
return destroy(s), (gzFile)Z_NULL; |
} |
strcpy(s->path, path); /* do this early for debugging */ |
s->mode = '\0'; |
do { |
if (*p == 'r') s->mode = 'r'; |
if (*p == 'w' || *p == 'a') s->mode = 'w'; |
if (*p >= '0' && *p <= '9') { |
level = *p - '0'; |
} else if (*p == 'f') { |
strategy = Z_FILTERED; |
} else if (*p == 'h') { |
strategy = Z_HUFFMAN_ONLY; |
} else { |
*m++ = *p; /* copy the mode */ |
} |
} while (*p++ && m != fmode + sizeof(fmode)); |
if (s->mode == '\0') return destroy(s), (gzFile)Z_NULL; |
if (s->mode == 'w') { |
#ifdef NO_DEFLATE |
err = Z_STREAM_ERROR; |
#else |
err = deflateInit2(&(s->stream), level, |
Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL, strategy); |
/* windowBits is passed < 0 to suppress zlib header */ |
s->stream.next_out = s->outbuf = (Byte*)ALLOC(Z_BUFSIZE); |
#endif |
if (err != Z_OK || s->outbuf == Z_NULL) { |
return destroy(s), (gzFile)Z_NULL; |
} |
} else { |
s->stream.next_in = s->inbuf = (Byte*)ALLOC(Z_BUFSIZE); |
err = inflateInit2(&(s->stream), -MAX_WBITS); |
/* windowBits is passed < 0 to tell that there is no zlib header. |
* Note that in this case inflate *requires* an extra "dummy" byte |
* after the compressed stream in order to complete decompression and |
* return Z_STREAM_END. Here the gzip CRC32 ensures that 4 bytes are |
* present after the compressed stream. |
*/ |
if (err != Z_OK || s->inbuf == Z_NULL) { |
return destroy(s), (gzFile)Z_NULL; |
} |
} |
s->stream.avail_out = Z_BUFSIZE; |
errno = 0; |
s->file = fd < 0 ? F_OPEN(path, fmode) : (FILE*)fdopen(fd, fmode); |
if (s->file == NULL) { |
return destroy(s), (gzFile)Z_NULL; |
} |
if (s->mode == 'w') { |
/* Write a very simple .gz header: |
*/ |
fprintf(s->file, "%c%c%c%c%c%c%c%c%c%c", gz_magic[0], gz_magic[1], |
Z_DEFLATED, 0 /*flags*/, 0,0,0,0 /*time*/, 0 /*xflags*/, OS_CODE); |
s->startpos = 10L; |
/* We use 10L instead of ftell(s->file) to because ftell causes an |
* fflush on some systems. This version of the library doesn't use |
* startpos anyway in write mode, so this initialization is not |
* necessary. |
*/ |
} else { |
check_header(s); /* skip the .gz header */ |
s->startpos = (ftell(s->file) - s->stream.avail_in); |
} |
return (gzFile)s; |
} |
/* =========================================================================== |
Opens a gzip (.gz) file for reading or writing. |
*/ |
gzFile ZEXPORT gzopen (path, mode) |
const char *path; |
const char *mode; |
{ |
return gz_open (path, mode, -1); |
} |
/* =========================================================================== |
Associate a gzFile with the file descriptor fd. fd is not dup'ed here |
to mimic the behavio(u)r of fdopen. |
*/ |
gzFile ZEXPORT gzdopen (fd, mode) |
int fd; |
const char *mode; |
{ |
char name[20]; |
if (fd < 0) return (gzFile)Z_NULL; |
sprintf(name, "<fd:%d>", fd); /* for debugging */ |
return gz_open (name, mode, fd); |
} |
/* =========================================================================== |
* Update the compression level and strategy |
*/ |
int ZEXPORT gzsetparams (file, level, strategy) |
gzFile file; |
int level; |
int strategy; |
{ |
gz_stream *s = (gz_stream*)file; |
if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR; |
/* Make room to allow flushing */ |
if (s->stream.avail_out == 0) { |
s->stream.next_out = s->outbuf; |
if (fwrite(s->outbuf, 1, Z_BUFSIZE, s->file) != Z_BUFSIZE) { |
s->z_err = Z_ERRNO; |
} |
s->stream.avail_out = Z_BUFSIZE; |
} |
return deflateParams (&(s->stream), level, strategy); |
} |
/* =========================================================================== |
Read a byte from a gz_stream; update next_in and avail_in. Return EOF |
for end of file. |
IN assertion: the stream s has been sucessfully opened for reading. |
*/ |
local int get_byte(s) |
gz_stream *s; |
{ |
if (s->z_eof) return EOF; |
if (s->stream.avail_in == 0) { |
errno = 0; |
s->stream.avail_in = fread(s->inbuf, 1, Z_BUFSIZE, s->file); |
if (s->stream.avail_in == 0) { |
s->z_eof = 1; |
if (ferror(s->file)) s->z_err = Z_ERRNO; |
return EOF; |
} |
s->stream.next_in = s->inbuf; |
} |
s->stream.avail_in--; |
return *(s->stream.next_in)++; |
} |
/* =========================================================================== |
Check the gzip header of a gz_stream opened for reading. Set the stream |
mode to transparent if the gzip magic header is not present; set s->err |
to Z_DATA_ERROR if the magic header is present but the rest of the header |
is incorrect. |
IN assertion: the stream s has already been created sucessfully; |
s->stream.avail_in is zero for the first time, but may be non-zero |
for concatenated .gz files. |
*/ |
local void check_header(s) |
gz_stream *s; |
{ |
int method; /* method byte */ |
int flags; /* flags byte */ |
uInt len; |
int c; |
/* Check the gzip magic header */ |
for (len = 0; len < 2; len++) { |
c = get_byte(s); |
if (c != gz_magic[len]) { |
if (len != 0) s->stream.avail_in++, s->stream.next_in--; |
if (c != EOF) { |
s->stream.avail_in++, s->stream.next_in--; |
s->transparent = 1; |
} |
s->z_err = s->stream.avail_in != 0 ? Z_OK : Z_STREAM_END; |
return; |
} |
} |
method = get_byte(s); |
flags = get_byte(s); |
if (method != Z_DEFLATED || (flags & RESERVED) != 0) { |
s->z_err = Z_DATA_ERROR; |
return; |
} |
/* Discard time, xflags and OS code: */ |
for (len = 0; len < 6; len++) (void)get_byte(s); |
if ((flags & EXTRA_FIELD) != 0) { /* skip the extra field */ |
len = (uInt)get_byte(s); |
len += ((uInt)get_byte(s))<<8; |
/* len is garbage if EOF but the loop below will quit anyway */ |
while (len-- != 0 && get_byte(s) != EOF) ; |
} |
if ((flags & ORIG_NAME) != 0) { /* skip the original file name */ |
while ((c = get_byte(s)) != 0 && c != EOF) ; |
} |
if ((flags & COMMENT) != 0) { /* skip the .gz file comment */ |
while ((c = get_byte(s)) != 0 && c != EOF) ; |
} |
if ((flags & HEAD_CRC) != 0) { /* skip the header crc */ |
for (len = 0; len < 2; len++) (void)get_byte(s); |
} |
s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK; |
} |
/* =========================================================================== |
* Cleanup then free the given gz_stream. Return a zlib error code. |
Try freeing in the reverse order of allocations. |
*/ |
local int destroy (s) |
gz_stream *s; |
{ |
int err = Z_OK; |
if (!s) return Z_STREAM_ERROR; |
TRYFREE(s->msg); |
if (s->stream.state != NULL) { |
if (s->mode == 'w') { |
#ifdef NO_DEFLATE |
err = Z_STREAM_ERROR; |
#else |
err = deflateEnd(&(s->stream)); |
#endif |
} else if (s->mode == 'r') { |
err = inflateEnd(&(s->stream)); |
} |
} |
if (s->file != NULL && fclose(s->file)) { |
#ifdef ESPIPE |
if (errno != ESPIPE) /* fclose is broken for pipes in HP/UX */ |
#endif |
err = Z_ERRNO; |
} |
if (s->z_err < 0) err = s->z_err; |
TRYFREE(s->inbuf); |
TRYFREE(s->outbuf); |
TRYFREE(s->path); |
TRYFREE(s); |
return err; |
} |
/* =========================================================================== |
Reads the given number of uncompressed bytes from the compressed file. |
gzread returns the number of bytes actually read (0 for end of file). |
*/ |
int ZEXPORT gzread (file, buf, len) |
gzFile file; |
voidp buf; |
unsigned len; |
{ |
gz_stream *s = (gz_stream*)file; |
Bytef *start = (Bytef*)buf; /* starting point for crc computation */ |
Byte *next_out; /* == stream.next_out but not forced far (for MSDOS) */ |
if (s == NULL || s->mode != 'r') return Z_STREAM_ERROR; |
if (s->z_err == Z_DATA_ERROR || s->z_err == Z_ERRNO) return -1; |
if (s->z_err == Z_STREAM_END) return 0; /* EOF */ |
next_out = (Byte*)buf; |
s->stream.next_out = (Bytef*)buf; |
s->stream.avail_out = len; |
while (s->stream.avail_out != 0) { |
if (s->transparent) { |
/* Copy first the lookahead bytes: */ |
uInt n = s->stream.avail_in; |
if (n > s->stream.avail_out) n = s->stream.avail_out; |
if (n > 0) { |
zmemcpy(s->stream.next_out, s->stream.next_in, n); |
next_out += n; |
s->stream.next_out = next_out; |
s->stream.next_in += n; |
s->stream.avail_out -= n; |
s->stream.avail_in -= n; |
} |
if (s->stream.avail_out > 0) { |
s->stream.avail_out -= fread(next_out, 1, s->stream.avail_out, |
s->file); |
} |
len -= s->stream.avail_out; |
s->stream.total_in += (uLong)len; |
s->stream.total_out += (uLong)len; |
if (len == 0) s->z_eof = 1; |
return (int)len; |
} |
if (s->stream.avail_in == 0 && !s->z_eof) { |
errno = 0; |
s->stream.avail_in = fread(s->inbuf, 1, Z_BUFSIZE, s->file); |
if (s->stream.avail_in == 0) { |
s->z_eof = 1; |
if (ferror(s->file)) { |
s->z_err = Z_ERRNO; |
break; |
} |
} |
s->stream.next_in = s->inbuf; |
} |
s->z_err = inflate(&(s->stream), Z_NO_FLUSH); |
if (s->z_err == Z_STREAM_END) { |
/* Check CRC and original size */ |
s->crc = crc32(s->crc, start, (uInt)(s->stream.next_out - start)); |
start = s->stream.next_out; |
if (getLong(s) != s->crc) { |
s->z_err = Z_DATA_ERROR; |
} else { |
(void)getLong(s); |
/* The uncompressed length returned by above getlong() may |
* be different from s->stream.total_out) in case of |
* concatenated .gz files. Check for such files: |
*/ |
check_header(s); |
if (s->z_err == Z_OK) { |
uLong total_in = s->stream.total_in; |
uLong total_out = s->stream.total_out; |
inflateReset(&(s->stream)); |
s->stream.total_in = total_in; |
s->stream.total_out = total_out; |
s->crc = crc32(0L, Z_NULL, 0); |
} |
} |
} |
if (s->z_err != Z_OK || s->z_eof) break; |
} |
s->crc = crc32(s->crc, start, (uInt)(s->stream.next_out - start)); |
return (int)(len - s->stream.avail_out); |
} |
/* =========================================================================== |
Reads one byte from the compressed file. gzgetc returns this byte |
or -1 in case of end of file or error. |
*/ |
int ZEXPORT gzgetc(file) |
gzFile file; |
{ |
unsigned char c; |
return gzread(file, &c, 1) == 1 ? c : -1; |
} |
/* =========================================================================== |
Reads bytes from the compressed file until len-1 characters are |
read, or a newline character is read and transferred to buf, or an |
end-of-file condition is encountered. The string is then terminated |
with a null character. |
gzgets returns buf, or Z_NULL in case of error. |
The current implementation is not optimized at all. |
*/ |
char * ZEXPORT gzgets(file, buf, len) |
gzFile file; |
char *buf; |
int len; |
{ |
char *b = buf; |
if (buf == Z_NULL || len <= 0) return Z_NULL; |
while (--len > 0 && gzread(file, buf, 1) == 1 && *buf++ != '\n') ; |
*buf = '\0'; |
return b == buf && len > 0 ? Z_NULL : b; |
} |
#ifndef NO_DEFLATE |
/* =========================================================================== |
Writes the given number of uncompressed bytes into the compressed file. |
gzwrite returns the number of bytes actually written (0 in case of error). |
*/ |
int ZEXPORT gzwrite (file, buf, len) |
gzFile file; |
const voidp buf; |
unsigned len; |
{ |
gz_stream *s = (gz_stream*)file; |
if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR; |
s->stream.next_in = (Bytef*)buf; |
s->stream.avail_in = len; |
while (s->stream.avail_in != 0) { |
if (s->stream.avail_out == 0) { |
s->stream.next_out = s->outbuf; |
if (fwrite(s->outbuf, 1, Z_BUFSIZE, s->file) != Z_BUFSIZE) { |
s->z_err = Z_ERRNO; |
break; |
} |
s->stream.avail_out = Z_BUFSIZE; |
} |
s->z_err = deflate(&(s->stream), Z_NO_FLUSH); |
if (s->z_err != Z_OK) break; |
} |
s->crc = crc32(s->crc, (const Bytef *)buf, len); |
return (int)(len - s->stream.avail_in); |
} |
/* =========================================================================== |
Converts, formats, and writes the args to the compressed file under |
control of the format string, as in fprintf. gzprintf returns the number of |
uncompressed bytes actually written (0 in case of error). |
*/ |
#ifdef STDC |
#include <stdarg.h> |
int ZEXPORTVA gzprintf (gzFile file, const char *format, /* args */ ...) |
{ |
char buf[Z_PRINTF_BUFSIZE]; |
va_list va; |
int len; |
va_start(va, format); |
#ifdef HAS_vsnprintf |
(void)vsnprintf(buf, sizeof(buf), format, va); |
#else |
(void)vsprintf(buf, format, va); |
#endif |
va_end(va); |
len = strlen(buf); /* some *sprintf don't return the nb of bytes written */ |
if (len <= 0) return 0; |
return gzwrite(file, buf, (unsigned)len); |
} |
#else /* not ANSI C */ |
int ZEXPORTVA gzprintf (file, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, |
a11, a12, a13, a14, a15, a16, a17, a18, a19, a20) |
gzFile file; |
const char *format; |
int a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, |
a11, a12, a13, a14, a15, a16, a17, a18, a19, a20; |
{ |
char buf[Z_PRINTF_BUFSIZE]; |
int len; |
#ifdef HAS_snprintf |
snprintf(buf, sizeof(buf), format, a1, a2, a3, a4, a5, a6, a7, a8, |
a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); |
#else |
sprintf(buf, format, a1, a2, a3, a4, a5, a6, a7, a8, |
a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); |
#endif |
len = strlen(buf); /* old sprintf doesn't return the nb of bytes written */ |
if (len <= 0) return 0; |
return gzwrite(file, buf, len); |
} |
#endif |
/* =========================================================================== |
Writes c, converted to an unsigned char, into the compressed file. |
gzputc returns the value that was written, or -1 in case of error. |
*/ |
int ZEXPORT gzputc(file, c) |
gzFile file; |
int c; |
{ |
unsigned char cc = (unsigned char) c; /* required for big endian systems */ |
return gzwrite(file, &cc, 1) == 1 ? (int)cc : -1; |
} |
/* =========================================================================== |
Writes the given null-terminated string to the compressed file, excluding |
the terminating null character. |
gzputs returns the number of characters written, or -1 in case of error. |
*/ |
int ZEXPORT gzputs(file, s) |
gzFile file; |
const char *s; |
{ |
return gzwrite(file, (char*)s, (unsigned)strlen(s)); |
} |
/* =========================================================================== |
Flushes all pending output into the compressed file. The parameter |
flush is as in the deflate() function. |
*/ |
local int do_flush (file, flush) |
gzFile file; |
int flush; |
{ |
uInt len; |
int done = 0; |
gz_stream *s = (gz_stream*)file; |
if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR; |
s->stream.avail_in = 0; /* should be zero already anyway */ |
for (;;) { |
len = Z_BUFSIZE - s->stream.avail_out; |
if (len != 0) { |
if ((uInt)fwrite(s->outbuf, 1, len, s->file) != len) { |
s->z_err = Z_ERRNO; |
return Z_ERRNO; |
} |
s->stream.next_out = s->outbuf; |
s->stream.avail_out = Z_BUFSIZE; |
} |
if (done) break; |
s->z_err = deflate(&(s->stream), flush); |
/* Ignore the second of two consecutive flushes: */ |
if (len == 0 && s->z_err == Z_BUF_ERROR) s->z_err = Z_OK; |
/* deflate has finished flushing only when it hasn't used up |
* all the available space in the output buffer: |
*/ |
done = (s->stream.avail_out != 0 || s->z_err == Z_STREAM_END); |
if (s->z_err != Z_OK && s->z_err != Z_STREAM_END) break; |
} |
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err; |
} |
int ZEXPORT gzflush (file, flush) |
gzFile file; |
int flush; |
{ |
gz_stream *s = (gz_stream*)file; |
int err = do_flush (file, flush); |
if (err) return err; |
fflush(s->file); |
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err; |
} |
#endif /* NO_DEFLATE */ |
/* =========================================================================== |
Sets the starting position for the next gzread or gzwrite on the given |
compressed file. The offset represents a number of bytes in the |
gzseek returns the resulting offset location as measured in bytes from |
the beginning of the uncompressed stream, or -1 in case of error. |
SEEK_END is not implemented, returns error. |
In this version of the library, gzseek can be extremely slow. |
*/ |
z_off_t ZEXPORT gzseek (file, offset, whence) |
gzFile file; |
z_off_t offset; |
int whence; |
{ |
gz_stream *s = (gz_stream*)file; |
if (s == NULL || whence == SEEK_END || |
s->z_err == Z_ERRNO || s->z_err == Z_DATA_ERROR) { |
return -1L; |
} |
if (s->mode == 'w') { |
#ifdef NO_DEFLATE |
return -1L; |
#else |
if (whence == SEEK_SET) { |
offset -= s->stream.total_in; |
} |
if (offset < 0) return -1L; |
/* At this point, offset is the number of zero bytes to write. */ |
if (s->inbuf == Z_NULL) { |
s->inbuf = (Byte*)ALLOC(Z_BUFSIZE); /* for seeking */ |
zmemzero(s->inbuf, Z_BUFSIZE); |
} |
while (offset > 0) { |
uInt size = Z_BUFSIZE; |
if (offset < Z_BUFSIZE) size = (uInt)offset; |
size = gzwrite(file, s->inbuf, size); |
if (size == 0) return -1L; |
offset -= size; |
} |
return (z_off_t)s->stream.total_in; |
#endif |
} |
/* Rest of function is for reading only */ |
/* compute absolute position */ |
if (whence == SEEK_CUR) { |
offset += s->stream.total_out; |
} |
if (offset < 0) return -1L; |
if (s->transparent) { |
/* map to fseek */ |
s->stream.avail_in = 0; |
s->stream.next_in = s->inbuf; |
if (fseek(s->file, offset, SEEK_SET) < 0) return -1L; |
s->stream.total_in = s->stream.total_out = (uLong)offset; |
return offset; |
} |
/* For a negative seek, rewind and use positive seek */ |
if ((uLong)offset >= s->stream.total_out) { |
offset -= s->stream.total_out; |
} else if (gzrewind(file) < 0) { |
return -1L; |
} |
/* offset is now the number of bytes to skip. */ |
if (offset != 0 && s->outbuf == Z_NULL) { |
s->outbuf = (Byte*)ALLOC(Z_BUFSIZE); |
} |
while (offset > 0) { |
int size = Z_BUFSIZE; |
if (offset < Z_BUFSIZE) size = (int)offset; |
size = gzread(file, s->outbuf, (uInt)size); |
if (size <= 0) return -1L; |
offset -= size; |
} |
return (z_off_t)s->stream.total_out; |
} |
/* =========================================================================== |
Rewinds input file. |
*/ |
int ZEXPORT gzrewind (file) |
gzFile file; |
{ |
gz_stream *s = (gz_stream*)file; |
if (s == NULL || s->mode != 'r') return -1; |
s->z_err = Z_OK; |
s->z_eof = 0; |
s->stream.avail_in = 0; |
s->stream.next_in = s->inbuf; |
s->crc = crc32(0L, Z_NULL, 0); |
if (s->startpos == 0) { /* not a compressed file */ |
rewind(s->file); |
return 0; |
} |
(void) inflateReset(&s->stream); |
return fseek(s->file, s->startpos, SEEK_SET); |
} |
/* =========================================================================== |
Returns the starting position for the next gzread or gzwrite on the |
given compressed file. This position represents a number of bytes in the |
uncompressed data stream. |
*/ |
z_off_t ZEXPORT gztell (file) |
gzFile file; |
{ |
return gzseek(file, 0L, SEEK_CUR); |
} |
/* =========================================================================== |
Returns 1 when EOF has previously been detected reading the given |
input stream, otherwise zero. |
*/ |
int ZEXPORT gzeof (file) |
gzFile file; |
{ |
gz_stream *s = (gz_stream*)file; |
return (s == NULL || s->mode != 'r') ? 0 : s->z_eof; |
} |
/* =========================================================================== |
Outputs a long in LSB order to the given file |
*/ |
local void putLong (file, x) |
FILE *file; |
uLong x; |
{ |
int n; |
for (n = 0; n < 4; n++) { |
fputc((int)(x & 0xff), file); |
x >>= 8; |
} |
} |
/* =========================================================================== |
Reads a long in LSB order from the given gz_stream. Sets z_err in case |
of error. |
*/ |
local uLong getLong (s) |
gz_stream *s; |
{ |
uLong x = (uLong)get_byte(s); |
int c; |
x += ((uLong)get_byte(s))<<8; |
x += ((uLong)get_byte(s))<<16; |
c = get_byte(s); |
if (c == EOF) s->z_err = Z_DATA_ERROR; |
x += ((uLong)c)<<24; |
return x; |
} |
/* =========================================================================== |
Flushes all pending output if necessary, closes the compressed file |
and deallocates all the (de)compression state. |
*/ |
int ZEXPORT gzclose (file) |
gzFile file; |
{ |
int err; |
gz_stream *s = (gz_stream*)file; |
if (s == NULL) return Z_STREAM_ERROR; |
if (s->mode == 'w') { |
#ifdef NO_DEFLATE |
return Z_STREAM_ERROR; |
#else |
err = do_flush (file, Z_FINISH); |
if (err != Z_OK) return destroy((gz_stream*)file); |
putLong (s->file, s->crc); |
putLong (s->file, s->stream.total_in); |
#endif |
} |
return destroy((gz_stream*)file); |
} |
/* =========================================================================== |
Returns the error message for the last error which occured on the |
given compressed file. errnum is set to zlib error number. If an |
error occured in the file system and not in the compression library, |
errnum is set to Z_ERRNO and the application may consult errno |
to get the exact error code. |
*/ |
const char* ZEXPORT gzerror (file, errnum) |
gzFile file; |
int *errnum; |
{ |
char *m; |
gz_stream *s = (gz_stream*)file; |
if (s == NULL) { |
*errnum = Z_STREAM_ERROR; |
return (const char*)ERR_MSG(Z_STREAM_ERROR); |
} |
*errnum = s->z_err; |
if (*errnum == Z_OK) return (const char*)""; |
m = (char*)(*errnum == Z_ERRNO ? zstrerror(errno) : s->stream.msg); |
if (m == NULL || *m == '\0') m = (char*)ERR_MSG(s->z_err); |
TRYFREE(s->msg); |
s->msg = (char*)ALLOC(strlen(s->path) + strlen(m) + 3); |
strcpy(s->msg, s->path); |
strcat(s->msg, ": "); |
strcat(s->msg, m); |
return (const char*)s->msg; |
} |
/shark/trunk/ports/png/deflate.h |
---|
0,0 → 1,318 |
/* deflate.h -- internal compression state |
* Copyright (C) 1995-2002 Jean-loup Gailly |
* For conditions of distribution and use, see copyright notice in zlib.h |
*/ |
/* WARNING: this file should *not* be used by applications. It is |
part of the implementation of the compression library and is |
subject to change. Applications should only use zlib.h. |
*/ |
/* @(#) $Id: deflate.h,v 1.1 2003-03-20 13:08:10 giacomo Exp $ */ |
#ifndef _DEFLATE_H |
#define _DEFLATE_H |
#include "zutil.h" |
/* =========================================================================== |
* Internal compression state. |
*/ |
#define LENGTH_CODES 29 |
/* number of length codes, not counting the special END_BLOCK code */ |
#define LITERALS 256 |
/* number of literal bytes 0..255 */ |
#define L_CODES (LITERALS+1+LENGTH_CODES) |
/* number of Literal or Length codes, including the END_BLOCK code */ |
#define D_CODES 30 |
/* number of distance codes */ |
#define BL_CODES 19 |
/* number of codes used to transfer the bit lengths */ |
#define HEAP_SIZE (2*L_CODES+1) |
/* maximum heap size */ |
#define MAX_BITS 15 |
/* All codes must not exceed MAX_BITS bits */ |
#define INIT_STATE 42 |
#define BUSY_STATE 113 |
#define FINISH_STATE 666 |
/* Stream status */ |
/* Data structure describing a single value and its code string. */ |
typedef struct ct_data_s { |
union { |
ush freq; /* frequency count */ |
ush code; /* bit string */ |
} fc; |
union { |
ush dad; /* father node in Huffman tree */ |
ush len; /* length of bit string */ |
} dl; |
} FAR ct_data; |
#define Freq fc.freq |
#define Code fc.code |
#define Dad dl.dad |
#define Len dl.len |
typedef struct static_tree_desc_s static_tree_desc; |
typedef struct tree_desc_s { |
ct_data *dyn_tree; /* the dynamic tree */ |
int max_code; /* largest code with non zero frequency */ |
static_tree_desc *stat_desc; /* the corresponding static tree */ |
} FAR tree_desc; |
typedef ush Pos; |
typedef Pos FAR Posf; |
typedef unsigned IPos; |
/* A Pos is an index in the character window. We use short instead of int to |
* save space in the various tables. IPos is used only for parameter passing. |
*/ |
typedef struct internal_state { |
z_streamp strm; /* pointer back to this zlib stream */ |
int status; /* as the name implies */ |
Bytef *pending_buf; /* output still pending */ |
ulg pending_buf_size; /* size of pending_buf */ |
Bytef *pending_out; /* next pending byte to output to the stream */ |
int pending; /* nb of bytes in the pending buffer */ |
int noheader; /* suppress zlib header and adler32 */ |
Byte data_type; /* UNKNOWN, BINARY or ASCII */ |
Byte method; /* STORED (for zip only) or DEFLATED */ |
int last_flush; /* value of flush param for previous deflate call */ |
/* used by deflate.c: */ |
uInt w_size; /* LZ77 window size (32K by default) */ |
uInt w_bits; /* log2(w_size) (8..16) */ |
uInt w_mask; /* w_size - 1 */ |
Bytef *window; |
/* Sliding window. Input bytes are read into the second half of the window, |
* and move to the first half later to keep a dictionary of at least wSize |
* bytes. With this organization, matches are limited to a distance of |
* wSize-MAX_MATCH bytes, but this ensures that IO is always |
* performed with a length multiple of the block size. Also, it limits |
* the window size to 64K, which is quite useful on MSDOS. |
* To do: use the user input buffer as sliding window. |
*/ |
ulg window_size; |
/* Actual size of window: 2*wSize, except when the user input buffer |
* is directly used as sliding window. |
*/ |
Posf *prev; |
/* Link to older string with same hash index. To limit the size of this |
* array to 64K, this link is maintained only for the last 32K strings. |
* An index in this array is thus a window index modulo 32K. |
*/ |
Posf *head; /* Heads of the hash chains or NIL. */ |
uInt ins_h; /* hash index of string to be inserted */ |
uInt hash_size; /* number of elements in hash table */ |
uInt hash_bits; /* log2(hash_size) */ |
uInt hash_mask; /* hash_size-1 */ |
uInt hash_shift; |
/* Number of bits by which ins_h must be shifted at each input |
* step. It must be such that after MIN_MATCH steps, the oldest |
* byte no longer takes part in the hash key, that is: |
* hash_shift * MIN_MATCH >= hash_bits |
*/ |
long block_start; |
/* Window position at the beginning of the current output block. Gets |
* negative when the window is moved backwards. |
*/ |
uInt match_length; /* length of best match */ |
IPos prev_match; /* previous match */ |
int match_available; /* set if previous match exists */ |
uInt strstart; /* start of string to insert */ |
uInt match_start; /* start of matching string */ |
uInt lookahead; /* number of valid bytes ahead in window */ |
uInt prev_length; |
/* Length of the best match at previous step. Matches not greater than this |
* are discarded. This is used in the lazy match evaluation. |
*/ |
uInt max_chain_length; |
/* To speed up deflation, hash chains are never searched beyond this |
* length. A higher limit improves compression ratio but degrades the |
* speed. |
*/ |
uInt max_lazy_match; |
/* Attempt to find a better match only when the current match is strictly |
* smaller than this value. This mechanism is used only for compression |
* levels >= 4. |
*/ |
# define max_insert_length max_lazy_match |
/* Insert new strings in the hash table only if the match length is not |
* greater than this length. This saves time but degrades compression. |
* max_insert_length is used only for compression levels <= 3. |
*/ |
int level; /* compression level (1..9) */ |
int strategy; /* favor or force Huffman coding*/ |
uInt good_match; |
/* Use a faster search when the previous match is longer than this */ |
int nice_match; /* Stop searching when current match exceeds this */ |
/* used by trees.c: */ |
/* Didn't use ct_data typedef below to supress compiler warning */ |
struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ |
struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ |
struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ |
struct tree_desc_s l_desc; /* desc. for literal tree */ |
struct tree_desc_s d_desc; /* desc. for distance tree */ |
struct tree_desc_s bl_desc; /* desc. for bit length tree */ |
ush bl_count[MAX_BITS+1]; |
/* number of codes at each bit length for an optimal tree */ |
int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ |
int heap_len; /* number of elements in the heap */ |
int heap_max; /* element of largest frequency */ |
/* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. |
* The same heap array is used to build all trees. |
*/ |
uch depth[2*L_CODES+1]; |
/* Depth of each subtree used as tie breaker for trees of equal frequency |
*/ |
uchf *l_buf; /* buffer for literals or lengths */ |
uInt lit_bufsize; |
/* Size of match buffer for literals/lengths. There are 4 reasons for |
* limiting lit_bufsize to 64K: |
* - frequencies can be kept in 16 bit counters |
* - if compression is not successful for the first block, all input |
* data is still in the window so we can still emit a stored block even |
* when input comes from standard input. (This can also be done for |
* all blocks if lit_bufsize is not greater than 32K.) |
* - if compression is not successful for a file smaller than 64K, we can |
* even emit a stored file instead of a stored block (saving 5 bytes). |
* This is applicable only for zip (not gzip or zlib). |
* - creating new Huffman trees less frequently may not provide fast |
* adaptation to changes in the input data statistics. (Take for |
* example a binary file with poorly compressible code followed by |
* a highly compressible string table.) Smaller buffer sizes give |
* fast adaptation but have of course the overhead of transmitting |
* trees more frequently. |
* - I can't count above 4 |
*/ |
uInt last_lit; /* running index in l_buf */ |
ushf *d_buf; |
/* Buffer for distances. To simplify the code, d_buf and l_buf have |
* the same number of elements. To use different lengths, an extra flag |
* array would be necessary. |
*/ |
ulg opt_len; /* bit length of current block with optimal trees */ |
ulg static_len; /* bit length of current block with static trees */ |
uInt matches; /* number of string matches in current block */ |
int last_eob_len; /* bit length of EOB code for last block */ |
#ifdef DEBUG |
ulg compressed_len; /* total bit length of compressed file mod 2^32 */ |
ulg bits_sent; /* bit length of compressed data sent mod 2^32 */ |
#endif |
ush bi_buf; |
/* Output buffer. bits are inserted starting at the bottom (least |
* significant bits). |
*/ |
int bi_valid; |
/* Number of valid bits in bi_buf. All bits above the last valid bit |
* are always zero. |
*/ |
} FAR deflate_state; |
/* Output a byte on the stream. |
* IN assertion: there is enough room in pending_buf. |
*/ |
#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);} |
#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) |
/* Minimum amount of lookahead, except at the end of the input file. |
* See deflate.c for comments about the MIN_MATCH+1. |
*/ |
#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD) |
/* In order to simplify the code, particularly on 16 bit machines, match |
* distances are limited to MAX_DIST instead of WSIZE. |
*/ |
/* in trees.c */ |
void _tr_init OF((deflate_state *s)); |
int _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc)); |
void _tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len, |
int eof)); |
void _tr_align OF((deflate_state *s)); |
void _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len, |
int eof)); |
#define d_code(dist) \ |
((dist) < 256 ? _dist_code[dist] : _dist_code[256+((dist)>>7)]) |
/* Mapping from a distance to a distance code. dist is the distance - 1 and |
* must not have side effects. _dist_code[256] and _dist_code[257] are never |
* used. |
*/ |
#ifndef DEBUG |
/* Inline versions of _tr_tally for speed: */ |
#if defined(GEN_TREES_H) || !defined(STDC) |
extern uch _length_code[]; |
extern uch _dist_code[]; |
#else |
extern const uch _length_code[]; |
extern const uch _dist_code[]; |
#endif |
# define _tr_tally_lit(s, c, flush) \ |
{ uch cc = (c); \ |
s->d_buf[s->last_lit] = 0; \ |
s->l_buf[s->last_lit++] = cc; \ |
s->dyn_ltree[cc].Freq++; \ |
flush = (s->last_lit == s->lit_bufsize-1); \ |
} |
# define _tr_tally_dist(s, distance, length, flush) \ |
{ uch len = (length); \ |
ush dist = (distance); \ |
s->d_buf[s->last_lit] = dist; \ |
s->l_buf[s->last_lit++] = len; \ |
dist--; \ |
s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \ |
s->dyn_dtree[d_code(dist)].Freq++; \ |
flush = (s->last_lit == s->lit_bufsize-1); \ |
} |
#else |
# define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c) |
# define _tr_tally_dist(s, distance, length, flush) \ |
flush = _tr_tally(s, distance, length) |
#endif |
#endif |
/shark/trunk/ports/png/infutil.c |
---|
0,0 → 1,87 |
/* inflate_util.c -- data and routines common to blocks and codes |
* Copyright (C) 1995-2002 Mark Adler |
* For conditions of distribution and use, see copyright notice in zlib.h |
*/ |
#include "zutil.h" |
#include "infblock.h" |
#include "inftrees.h" |
#include "infcodes.h" |
#include "infutil.h" |
struct inflate_codes_state {int dummy;}; /* for buggy compilers */ |
/* And'ing with mask[n] masks the lower n bits */ |
uInt inflate_mask[17] = { |
0x0000, |
0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, |
0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff |
}; |
/* copy as much as possible from the sliding window to the output area */ |
int inflate_flush(s, z, r) |
inflate_blocks_statef *s; |
z_streamp z; |
int r; |
{ |
uInt n; |
Bytef *p; |
Bytef *q; |
/* local copies of source and destination pointers */ |
p = z->next_out; |
q = s->read; |
/* compute number of bytes to copy as far as end of window */ |
n = (uInt)((q <= s->write ? s->write : s->end) - q); |
if (n > z->avail_out) n = z->avail_out; |
if (n && r == Z_BUF_ERROR) r = Z_OK; |
/* update counters */ |
z->avail_out -= n; |
z->total_out += n; |
/* update check information */ |
if (s->checkfn != Z_NULL) |
z->adler = s->check = (*s->checkfn)(s->check, q, n); |
/* copy as far as end of window */ |
zmemcpy(p, q, n); |
p += n; |
q += n; |
/* see if more to copy at beginning of window */ |
if (q == s->end) |
{ |
/* wrap pointers */ |
q = s->window; |
if (s->write == s->end) |
s->write = s->window; |
/* compute bytes to copy */ |
n = (uInt)(s->write - q); |
if (n > z->avail_out) n = z->avail_out; |
if (n && r == Z_BUF_ERROR) r = Z_OK; |
/* update counters */ |
z->avail_out -= n; |
z->total_out += n; |
/* update check information */ |
if (s->checkfn != Z_NULL) |
z->adler = s->check = (*s->checkfn)(s->check, q, n); |
/* copy */ |
zmemcpy(p, q, n); |
p += n; |
q += n; |
} |
/* update pointers */ |
z->next_out = p; |
s->read = q; |
/* done */ |
return r; |
} |
/shark/trunk/ports/png/zutil.h |
---|
0,0 → 1,220 |
/* zutil.h -- internal interface and configuration of the compression library |
* Copyright (C) 1995-2002 Jean-loup Gailly. |
* For conditions of distribution and use, see copyright notice in zlib.h |
*/ |
/* WARNING: this file should *not* be used by applications. It is |
part of the implementation of the compression library and is |
subject to change. Applications should only use zlib.h. |
*/ |
/* @(#) $Id: zutil.h,v 1.1 2003-03-20 13:08:13 giacomo Exp $ */ |
#ifndef _Z_UTIL_H |
#define _Z_UTIL_H |
#include "zlib.h" |
#ifdef STDC |
# include <stddef.h> |
# include <string.h> |
# include <stdlib.h> |
#endif |
#ifdef NO_ERRNO_H |
extern int errno; |
#else |
# include <errno.h> |
#endif |
#ifndef local |
# define local static |
#endif |
/* compile with -Dlocal if your debugger can't find static symbols */ |
typedef unsigned char uch; |
typedef uch FAR uchf; |
typedef unsigned short ush; |
typedef ush FAR ushf; |
typedef unsigned long ulg; |
extern const char *z_errmsg[10]; /* indexed by 2-zlib_error */ |
/* (size given to avoid silly warnings with Visual C++) */ |
#define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)] |
#define ERR_RETURN(strm,err) \ |
return (strm->msg = (char*)ERR_MSG(err), (err)) |
/* To be used only when the state is known to be valid */ |
/* common constants */ |
#ifndef DEF_WBITS |
# define DEF_WBITS MAX_WBITS |
#endif |
/* default windowBits for decompression. MAX_WBITS is for compression only */ |
#if MAX_MEM_LEVEL >= 8 |
# define DEF_MEM_LEVEL 8 |
#else |
# define DEF_MEM_LEVEL MAX_MEM_LEVEL |
#endif |
/* default memLevel */ |
#define STORED_BLOCK 0 |
#define STATIC_TREES 1 |
#define DYN_TREES 2 |
/* The three kinds of block type */ |
#define MIN_MATCH 3 |
#define MAX_MATCH 258 |
/* The minimum and maximum match lengths */ |
#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */ |
/* target dependencies */ |
#ifdef MSDOS |
# define OS_CODE 0x00 |
# if defined(__TURBOC__) || defined(__BORLANDC__) |
# if(__STDC__ == 1) && (defined(__LARGE__) || defined(__COMPACT__)) |
/* Allow compilation with ANSI keywords only enabled */ |
void _Cdecl farfree( void *block ); |
void *_Cdecl farmalloc( unsigned long nbytes ); |
# else |
# include <alloc.h> |
# endif |
# else /* MSC or DJGPP */ |
# include <malloc.h> |
# endif |
#endif |
#ifdef OS2 |
# define OS_CODE 0x06 |
#endif |
#ifdef WIN32 /* Window 95 & Windows NT */ |
# define OS_CODE 0x0b |
#endif |
#if defined(VAXC) || defined(VMS) |
# define OS_CODE 0x02 |
# define F_OPEN(name, mode) \ |
fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512") |
#endif |
#ifdef AMIGA |
# define OS_CODE 0x01 |
#endif |
#if defined(ATARI) || defined(atarist) |
# define OS_CODE 0x05 |
#endif |
#if defined(MACOS) || defined(TARGET_OS_MAC) |
# define OS_CODE 0x07 |
# if defined(__MWERKS__) && __dest_os != __be_os && __dest_os != __win32_os |
# include <unix.h> /* for fdopen */ |
# else |
# ifndef fdopen |
# define fdopen(fd,mode) NULL /* No fdopen() */ |
# endif |
# endif |
#endif |
#ifdef __50SERIES /* Prime/PRIMOS */ |
# define OS_CODE 0x0F |
#endif |
#ifdef TOPS20 |
# define OS_CODE 0x0a |
#endif |
#if defined(_BEOS_) || defined(RISCOS) |
# define fdopen(fd,mode) NULL /* No fdopen() */ |
#endif |
#if (defined(_MSC_VER) && (_MSC_VER > 600)) |
# define fdopen(fd,type) _fdopen(fd,type) |
#endif |
/* Common defaults */ |
#ifndef OS_CODE |
# define OS_CODE 0x03 /* assume Unix */ |
#endif |
#ifndef F_OPEN |
# define F_OPEN(name, mode) fopen((name), (mode)) |
#endif |
/* functions */ |
#ifdef HAVE_STRERROR |
extern char *strerror OF((int)); |
# define zstrerror(errnum) strerror(errnum) |
#else |
# define zstrerror(errnum) "" |
#endif |
#if defined(pyr) |
# define NO_MEMCPY |
#endif |
#if defined(SMALL_MEDIUM) && !defined(_MSC_VER) && !defined(__SC__) |
/* Use our own functions for small and medium model with MSC <= 5.0. |
* You may have to use the same strategy for Borland C (untested). |
* The __SC__ check is for Symantec. |
*/ |
# define NO_MEMCPY |
#endif |
#if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY) |
# define HAVE_MEMCPY |
#endif |
#ifdef HAVE_MEMCPY |
# ifdef SMALL_MEDIUM /* MSDOS small or medium model */ |
# define zmemcpy _fmemcpy |
# define zmemcmp _fmemcmp |
# define zmemzero(dest, len) _fmemset(dest, 0, len) |
# else |
# define zmemcpy memcpy |
# define zmemcmp memcmp |
# define zmemzero(dest, len) memset(dest, 0, len) |
# endif |
#else |
extern void zmemcpy OF((Bytef* dest, const Bytef* source, uInt len)); |
extern int zmemcmp OF((const Bytef* s1, const Bytef* s2, uInt len)); |
extern void zmemzero OF((Bytef* dest, uInt len)); |
#endif |
/* Diagnostic functions */ |
#ifdef DEBUG |
# include <stdio.h> |
extern int z_verbose; |
extern void z_error OF((char *m)); |
# define Assert(cond,msg) {if(!(cond)) z_error(msg);} |
# define Trace(stderr,x) {if (z_verbose>=0) cprintf x ;} |
# define Tracev(stderr,x) {if (z_verbose>0) cprintf x ;} |
# define Tracevv(stderr,x) {if (z_verbose>1) cprintf x ;} |
# define Tracec(c,(stderr,x)) {if (z_verbose>0 && (c)) cprintf (x) ;} |
# define Tracecv(c,(stderr,x)) {if (z_verbose>1 && (c)) cprintf (x) ;} |
#else |
# define Assert(cond,msg) |
# define Trace(x) |
# define Tracev(x) |
# define Tracevv(x) |
# define Tracec(c,x) |
# define Tracecv(c,x) |
#endif |
typedef uLong (ZEXPORT *check_func) OF((uLong check, const Bytef *buf, |
uInt len)); |
voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size)); |
void zcfree OF((voidpf opaque, voidpf ptr)); |
#define ZALLOC(strm, items, size) \ |
(*((strm)->zalloc))((strm)->opaque, (items), (size)) |
#define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr)) |
#define TRY_FREE(s, p) {if (p) ZFREE(s, p);} |
#endif /* _Z_UTIL_H */ |
/shark/trunk/ports/png/crc32.c |
---|
0,0 → 1,162 |
/* crc32.c -- compute the CRC-32 of a data stream |
* Copyright (C) 1995-2002 Mark Adler |
* For conditions of distribution and use, see copyright notice in zlib.h |
*/ |
/* @(#) $Id: crc32.c,v 1.1 2003-03-20 13:08:10 giacomo Exp $ */ |
#include "zlib.h" |
#define local static |
#ifdef DYNAMIC_CRC_TABLE |
local int crc_table_empty = 1; |
local uLongf crc_table[256]; |
local void make_crc_table OF((void)); |
/* |
Generate a table for a byte-wise 32-bit CRC calculation on the polynomial: |
x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x+1. |
Polynomials over GF(2) are represented in binary, one bit per coefficient, |
with the lowest powers in the most significant bit. Then adding polynomials |
is just exclusive-or, and multiplying a polynomial by x is a right shift by |
one. If we call the above polynomial p, and represent a byte as the |
polynomial q, also with the lowest power in the most significant bit (so the |
byte 0xb1 is the polynomial x^7+x^3+x+1), then the CRC is (q*x^32) mod p, |
where a mod b means the remainder after dividing a by b. |
This calculation is done using the shift-register method of multiplying and |
taking the remainder. The register is initialized to zero, and for each |
incoming bit, x^32 is added mod p to the register if the bit is a one (where |
x^32 mod p is p+x^32 = x^26+...+1), and the register is multiplied mod p by |
x (which is shifting right by one and adding x^32 mod p if the bit shifted |
out is a one). We start with the highest power (least significant bit) of |
q and repeat for all eight bits of q. |
The table is simply the CRC of all possible eight bit values. This is all |
the information needed to generate CRC's on data a byte at a time for all |
combinations of CRC register values and incoming bytes. |
*/ |
local void make_crc_table() |
{ |
uLong c; |
int n, k; |
uLong poly; /* polynomial exclusive-or pattern */ |
/* terms of polynomial defining this crc (except x^32): */ |
static const Byte p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26}; |
/* make exclusive-or pattern from polynomial (0xedb88320L) */ |
poly = 0L; |
for (n = 0; n < sizeof(p)/sizeof(Byte); n++) |
poly |= 1L << (31 - p[n]); |
for (n = 0; n < 256; n++) |
{ |
c = (uLong)n; |
for (k = 0; k < 8; k++) |
c = c & 1 ? poly ^ (c >> 1) : c >> 1; |
crc_table[n] = c; |
} |
crc_table_empty = 0; |
} |
#else |
/* ======================================================================== |
* Table of CRC-32's of all single-byte values (made by make_crc_table) |
*/ |
local const uLongf crc_table[256] = { |
0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L, |
0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L, |
0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, |
0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, |
0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, |
0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, |
0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L, |
0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL, |
0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L, |
0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, |
0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, |
0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L, |
0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L, |
0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL, |
0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL, |
0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L, |
0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, |
0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, |
0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L, |
0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L, |
0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL, |
0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, |
0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, |
0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL, |
0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L, |
0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L, |
0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L, |
0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L, |
0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L, |
0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL, |
0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL, |
0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L, |
0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L, |
0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, |
0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, |
0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L, |
0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL, |
0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L, |
0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL, |
0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L, |
0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL, |
0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, |
0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L, |
0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL, |
0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L, |
0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, |
0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, |
0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, |
0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L, |
0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L, |
0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL, |
0x2d02ef8dL |
}; |
#endif |
/* ========================================================================= |
* This function can be used by asm versions of crc32() |
*/ |
const uLongf * ZEXPORT get_crc_table() |
{ |
#ifdef DYNAMIC_CRC_TABLE |
if (crc_table_empty) make_crc_table(); |
#endif |
return (const uLongf *)crc_table; |
} |
/* ========================================================================= */ |
#define DO1(buf) crc = crc_table[((int)crc ^ (*buf++)) & 0xff] ^ (crc >> 8); |
#define DO2(buf) DO1(buf); DO1(buf); |
#define DO4(buf) DO2(buf); DO2(buf); |
#define DO8(buf) DO4(buf); DO4(buf); |
/* ========================================================================= */ |
uLong ZEXPORT crc32(crc, buf, len) |
uLong crc; |
const Bytef *buf; |
uInt len; |
{ |
if (buf == Z_NULL) return 0L; |
#ifdef DYNAMIC_CRC_TABLE |
if (crc_table_empty) |
make_crc_table(); |
#endif |
crc = crc ^ 0xffffffffL; |
while (len >= 8) |
{ |
DO8(buf); |
len -= 8; |
} |
if (len) do { |
DO1(buf); |
} while (--len); |
return crc ^ 0xffffffffL; |
} |
/shark/trunk/ports/png/infutil.h |
---|
0,0 → 1,98 |
/* infutil.h -- types and macros common to blocks and codes |
* Copyright (C) 1995-2002 Mark Adler |
* For conditions of distribution and use, see copyright notice in zlib.h |
*/ |
/* WARNING: this file should *not* be used by applications. It is |
part of the implementation of the compression library and is |
subject to change. Applications should only use zlib.h. |
*/ |
#ifndef _INFUTIL_H |
#define _INFUTIL_H |
typedef enum { |
TYPE, /* get type bits (3, including end bit) */ |
LENS, /* get lengths for stored */ |
STORED, /* processing stored block */ |
TABLE, /* get table lengths */ |
BTREE, /* get bit lengths tree for a dynamic block */ |
DTREE, /* get length, distance trees for a dynamic block */ |
CODES, /* processing fixed or dynamic block */ |
DRY, /* output remaining window bytes */ |
DONE, /* finished last block, done */ |
BAD} /* got a data error--stuck here */ |
inflate_block_mode; |
/* inflate blocks semi-private state */ |
struct inflate_blocks_state { |
/* mode */ |
inflate_block_mode mode; /* current inflate_block mode */ |
/* mode dependent information */ |
union { |
uInt left; /* if STORED, bytes left to copy */ |
struct { |
uInt table; /* table lengths (14 bits) */ |
uInt index; /* index into blens (or border) */ |
uIntf *blens; /* bit lengths of codes */ |
uInt bb; /* bit length tree depth */ |
inflate_huft *tb; /* bit length decoding tree */ |
} trees; /* if DTREE, decoding info for trees */ |
struct { |
inflate_codes_statef |
*codes; |
} decode; /* if CODES, current state */ |
} sub; /* submode */ |
uInt last; /* true if this block is the last block */ |
/* mode independent information */ |
uInt bitk; /* bits in bit buffer */ |
uLong bitb; /* bit buffer */ |
inflate_huft *hufts; /* single malloc for tree space */ |
Bytef *window; /* sliding window */ |
Bytef *end; /* one byte after sliding window */ |
Bytef *read; /* window read pointer */ |
Bytef *write; /* window write pointer */ |
check_func checkfn; /* check function */ |
uLong check; /* check on output */ |
}; |
/* defines for inflate input/output */ |
/* update pointers and return */ |
#define UPDBITS {s->bitb=b;s->bitk=k;} |
#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;} |
#define UPDOUT {s->write=q;} |
#define UPDATE {UPDBITS UPDIN UPDOUT} |
#define LEAVE {UPDATE return inflate_flush(s,z,r);} |
/* get bytes and bits */ |
#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;} |
#define NEEDBYTE {if(n)r=Z_OK;else LEAVE} |
#define NEXTBYTE (n--,*p++) |
#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}} |
#define DUMPBITS(j) {b>>=(j);k-=(j);} |
/* output bytes */ |
#define WAVAIL (uInt)(q<s->read?s->read-q-1:s->end-q) |
#define LOADOUT {q=s->write;m=(uInt)WAVAIL;} |
#define WRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=(uInt)WAVAIL;}} |
#define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT} |
#define NEEDOUT {if(m==0){WRAP if(m==0){FLUSH WRAP if(m==0) LEAVE}}r=Z_OK;} |
#define OUTBYTE(a) {*q++=(Byte)(a);m--;} |
/* load local pointers */ |
#define LOAD {LOADIN LOADOUT} |
/* masks for lower bits (size given to avoid silly warnings with Visual C++) */ |
extern uInt inflate_mask[17]; |
/* copy as much as possible from the sliding window to the output area */ |
extern int inflate_flush OF(( |
inflate_blocks_statef *, |
z_streamp , |
int)); |
struct internal_state {int dummy;}; /* for buggy compilers */ |
#endif |
/shark/trunk/ports/png/pngread.c |
---|
0,0 → 1,1424 |
/* pngread.c - read a PNG file |
* |
* libpng 1.2.5 - October 3, 2002 |
* For conditions of distribution and use, see copyright notice in png.h |
* Copyright (c) 1998-2002 Glenn Randers-Pehrson |
* (Version 0.96 Copyright (c) 1996, 1997 Andreas Dilger) |
* (Version 0.88 Copyright (c) 1995, 1996 Guy Eric Schalnat, Group 42, Inc.) |
* |
* This file contains routines that an application calls directly to |
* read a PNG file or stream. |
*/ |
#define PNG_INTERNAL |
#include "png.h" |
/* Create a PNG structure for reading, and allocate any memory needed. */ |
png_structp PNGAPI |
png_create_read_struct(png_const_charp user_png_ver, png_voidp error_ptr, |
png_error_ptr error_fn, png_error_ptr warn_fn) |
{ |
#ifdef PNG_USER_MEM_SUPPORTED |
return (png_create_read_struct_2(user_png_ver, error_ptr, error_fn, |
warn_fn, png_voidp_NULL, png_malloc_ptr_NULL, png_free_ptr_NULL)); |
} |
/* Alternate create PNG structure for reading, and allocate any memory needed. */ |
png_structp PNGAPI |
png_create_read_struct_2(png_const_charp user_png_ver, png_voidp error_ptr, |
png_error_ptr error_fn, png_error_ptr warn_fn, png_voidp mem_ptr, |
png_malloc_ptr malloc_fn, png_free_ptr free_fn) |
{ |
#endif /* PNG_USER_MEM_SUPPORTED */ |
png_structp png_ptr; |
#ifdef PNG_SETJMP_SUPPORTED |
#ifdef USE_FAR_KEYWORD |
jmp_buf jmpbuf; |
#endif |
#endif |
int i; |
png_debug(1, "in png_create_read_struct\n"); |
#ifdef PNG_USER_MEM_SUPPORTED |
png_ptr = (png_structp)png_create_struct_2(PNG_STRUCT_PNG, |
(png_malloc_ptr)malloc_fn, (png_voidp)mem_ptr); |
#else |
png_ptr = (png_structp)png_create_struct(PNG_STRUCT_PNG); |
#endif |
if (png_ptr == NULL) |
return (NULL); |
#if !defined(PNG_1_0_X) |
#ifdef PNG_ASSEMBLER_CODE_SUPPORTED |
png_init_mmx_flags(png_ptr); /* 1.2.0 addition */ |
#endif |
#endif /* PNG_1_0_X */ |
#ifdef PNG_SETJMP_SUPPORTED |
#ifdef USE_FAR_KEYWORD |
if (setjmp(jmpbuf)) |
#else |
if (setjmp(png_ptr->jmpbuf)) |
#endif |
{ |
png_free(png_ptr, png_ptr->zbuf); |
png_ptr->zbuf=NULL; |
#ifdef PNG_USER_MEM_SUPPORTED |
png_destroy_struct_2((png_voidp)png_ptr, |
(png_free_ptr)free_fn, (png_voidp)mem_ptr); |
#else |
png_destroy_struct((png_voidp)png_ptr); |
#endif |
return (NULL); |
} |
#ifdef USE_FAR_KEYWORD |
png_memcpy(png_ptr->jmpbuf,jmpbuf,sizeof(jmp_buf)); |
#endif |
#endif |
#ifdef PNG_USER_MEM_SUPPORTED |
png_set_mem_fn(png_ptr, mem_ptr, malloc_fn, free_fn); |
#endif |
png_set_error_fn(png_ptr, error_ptr, error_fn, warn_fn); |
i=0; |
do |
{ |
if(user_png_ver[i] != png_libpng_ver[i]) |
png_ptr->flags |= PNG_FLAG_LIBRARY_MISMATCH; |
} while (png_libpng_ver[i++]); |
if (png_ptr->flags & PNG_FLAG_LIBRARY_MISMATCH) |
{ |
/* Libpng 0.90 and later are binary incompatible with libpng 0.89, so |
* we must recompile any applications that use any older library version. |
* For versions after libpng 1.0, we will be compatible, so we need |
* only check the first digit. |
*/ |
if (user_png_ver == NULL || user_png_ver[0] != png_libpng_ver[0] || |
(user_png_ver[0] == '1' && user_png_ver[2] != png_libpng_ver[2]) || |
(user_png_ver[0] == '0' && user_png_ver[2] < '9')) |
{ |
#if !defined(PNG_NO_STDIO) && !defined(_WIN32_WCE) |
char msg[80]; |
if (user_png_ver) |
{ |
sprintf(msg, "Application was compiled with png.h from libpng-%.20s", |
user_png_ver); |
png_warning(png_ptr, msg); |
} |
sprintf(msg, "Application is running with png.c from libpng-%.20s", |
png_libpng_ver); |
png_warning(png_ptr, msg); |
#endif |
#ifdef PNG_ERROR_NUMBERS_SUPPORTED |
png_ptr->flags=0; |
#endif |
png_error(png_ptr, |
"Incompatible libpng version in application and library"); |
} |
} |
/* initialize zbuf - compression buffer */ |
png_ptr->zbuf_size = PNG_ZBUF_SIZE; |
png_ptr->zbuf = (png_bytep)png_malloc(png_ptr, |
(png_uint_32)png_ptr->zbuf_size); |
png_ptr->zstream.zalloc = png_zalloc; |
png_ptr->zstream.zfree = png_zfree; |
png_ptr->zstream.opaque = (voidpf)png_ptr; |
switch (inflateInit(&png_ptr->zstream)) |
{ |
case Z_OK: /* Do nothing */ break; |
case Z_MEM_ERROR: |
case Z_STREAM_ERROR: png_error(png_ptr, "zlib memory error"); break; |
case Z_VERSION_ERROR: png_error(png_ptr, "zlib version error"); break; |
default: png_error(png_ptr, "Unknown zlib error"); |
} |
png_ptr->zstream.next_out = png_ptr->zbuf; |
png_ptr->zstream.avail_out = (uInt)png_ptr->zbuf_size; |
png_set_read_fn(png_ptr, png_voidp_NULL, png_rw_ptr_NULL); |
#ifdef PNG_SETJMP_SUPPORTED |
/* Applications that neglect to set up their own setjmp() and then encounter |
a png_error() will longjmp here. Since the jmpbuf is then meaningless we |
abort instead of returning. */ |
#ifdef USE_FAR_KEYWORD |
if (setjmp(jmpbuf)) |
PNG_ABORT(); |
png_memcpy(png_ptr->jmpbuf,jmpbuf,sizeof(jmp_buf)); |
#else |
if (setjmp(png_ptr->jmpbuf)) |
PNG_ABORT(); |
#endif |
#endif |
return (png_ptr); |
} |
/* Initialize PNG structure for reading, and allocate any memory needed. |
This interface is deprecated in favour of the png_create_read_struct(), |
and it will eventually disappear. */ |
#undef png_read_init |
void PNGAPI |
png_read_init(png_structp png_ptr) |
{ |
/* We only come here via pre-1.0.7-compiled applications */ |
png_read_init_2(png_ptr, "1.0.6 or earlier", 0, 0); |
} |
void PNGAPI |
png_read_init_2(png_structp png_ptr, png_const_charp user_png_ver, |
png_size_t png_struct_size, png_size_t png_info_size) |
{ |
/* We only come here via pre-1.0.12-compiled applications */ |
#if !defined(PNG_NO_STDIO) && !defined(_WIN32_WCE) |
if(sizeof(png_struct) > png_struct_size || sizeof(png_info) > png_info_size) |
{ |
char msg[80]; |
png_ptr->warning_fn=NULL; |
if (user_png_ver) |
{ |
sprintf(msg, "Application was compiled with png.h from libpng-%.20s", |
user_png_ver); |
png_warning(png_ptr, msg); |
} |
sprintf(msg, "Application is running with png.c from libpng-%.20s", |
png_libpng_ver); |
png_warning(png_ptr, msg); |
} |
#endif |
if(sizeof(png_struct) > png_struct_size) |
{ |
png_ptr->error_fn=NULL; |
#ifdef PNG_ERROR_NUMBERS_SUPPORTED |
png_ptr->flags=0; |
#endif |
png_error(png_ptr, |
"The png struct allocated by the application for reading is too small."); |
} |
if(sizeof(png_info) > png_info_size) |
{ |
png_ptr->error_fn=NULL; |
#ifdef PNG_ERROR_NUMBERS_SUPPORTED |
png_ptr->flags=0; |
#endif |
png_error(png_ptr, |
"The info struct allocated by application for reading is too small."); |
} |
png_read_init_3(&png_ptr, user_png_ver, png_struct_size); |
} |
void PNGAPI |
png_read_init_3(png_structpp ptr_ptr, png_const_charp user_png_ver, |
png_size_t png_struct_size) |
{ |
#ifdef PNG_SETJMP_SUPPORTED |
jmp_buf tmp_jmp; /* to save current jump buffer */ |
#endif |
int i=0; |
png_structp png_ptr=*ptr_ptr; |
do |
{ |
if(user_png_ver[i] != png_libpng_ver[i]) |
{ |
#ifdef PNG_LEGACY_SUPPORTED |
png_ptr->flags |= PNG_FLAG_LIBRARY_MISMATCH; |
#else |
png_ptr->warning_fn=NULL; |
png_warning(png_ptr, |
"Application uses deprecated png_read_init() and should be recompiled."); |
break; |
#endif |
} |
} while (png_libpng_ver[i++]); |
png_debug(1, "in png_read_init_3\n"); |
#ifdef PNG_SETJMP_SUPPORTED |
/* save jump buffer and error functions */ |
png_memcpy(tmp_jmp, png_ptr->jmpbuf, sizeof (jmp_buf)); |
#endif |
if(sizeof(png_struct) > png_struct_size) |
{ |
png_destroy_struct(png_ptr); |
*ptr_ptr = (png_structp)png_create_struct(PNG_STRUCT_PNG); |
png_ptr = *ptr_ptr; |
} |
/* reset all variables to 0 */ |
png_memset(png_ptr, 0, sizeof (png_struct)); |
#ifdef PNG_SETJMP_SUPPORTED |
/* restore jump buffer */ |
png_memcpy(png_ptr->jmpbuf, tmp_jmp, sizeof (jmp_buf)); |
#endif |
/* initialize zbuf - compression buffer */ |
png_ptr->zbuf_size = PNG_ZBUF_SIZE; |
png_ptr->zbuf = (png_bytep)png_malloc(png_ptr, |
(png_uint_32)png_ptr->zbuf_size); |
png_ptr->zstream.zalloc = png_zalloc; |
png_ptr->zstream.zfree = png_zfree; |
png_ptr->zstream.opaque = (voidpf)png_ptr; |
switch (inflateInit(&png_ptr->zstream)) |
{ |
case Z_OK: /* Do nothing */ break; |
case Z_MEM_ERROR: |
case Z_STREAM_ERROR: png_error(png_ptr, "zlib memory"); break; |
case Z_VERSION_ERROR: png_error(png_ptr, "zlib version"); break; |
default: png_error(png_ptr, "Unknown zlib error"); |
} |
png_ptr->zstream.next_out = png_ptr->zbuf; |
png_ptr->zstream.avail_out = (uInt)png_ptr->zbuf_size; |
png_set_read_fn(png_ptr, png_voidp_NULL, png_rw_ptr_NULL); |
} |
/* Read the information before the actual image data. This has been |
* changed in v0.90 to allow reading a file that already has the magic |
* bytes read from the stream. You can tell libpng how many bytes have |
* been read from the beginning of the stream (up to the maximum of 8) |
* via png_set_sig_bytes(), and we will only check the remaining bytes |
* here. The application can then have access to the signature bytes we |
* read if it is determined that this isn't a valid PNG file. |
*/ |
void PNGAPI |
png_read_info(png_structp png_ptr, png_infop info_ptr) |
{ |
png_debug(1, "in png_read_info\n"); |
/* If we haven't checked all of the PNG signature bytes, do so now. */ |
if (png_ptr->sig_bytes < 8) |
{ |
png_size_t num_checked = png_ptr->sig_bytes, |
num_to_check = 8 - num_checked; |
png_read_data(png_ptr, &(info_ptr->signature[num_checked]), num_to_check); |
png_ptr->sig_bytes = 8; |
if (png_sig_cmp(info_ptr->signature, num_checked, num_to_check)) |
{ |
if (num_checked < 4 && |
png_sig_cmp(info_ptr->signature, num_checked, num_to_check - 4)) |
png_error(png_ptr, "Not a PNG file"); |
else |
png_error(png_ptr, "PNG file corrupted by ASCII conversion"); |
} |
if (num_checked < 3) |
png_ptr->mode |= PNG_HAVE_PNG_SIGNATURE; |
} |
for(;;) |
{ |
#ifdef PNG_USE_LOCAL_ARRAYS |
PNG_IHDR; |
PNG_IDAT; |
PNG_IEND; |
PNG_PLTE; |
#if defined(PNG_READ_bKGD_SUPPORTED) |
PNG_bKGD; |
#endif |
#if defined(PNG_READ_cHRM_SUPPORTED) |
PNG_cHRM; |
#endif |
#if defined(PNG_READ_gAMA_SUPPORTED) |
PNG_gAMA; |
#endif |
#if defined(PNG_READ_hIST_SUPPORTED) |
PNG_hIST; |
#endif |
#if defined(PNG_READ_iCCP_SUPPORTED) |
PNG_iCCP; |
#endif |
#if defined(PNG_READ_iTXt_SUPPORTED) |
PNG_iTXt; |
#endif |
#if defined(PNG_READ_oFFs_SUPPORTED) |
PNG_oFFs; |
#endif |
#if defined(PNG_READ_pCAL_SUPPORTED) |
PNG_pCAL; |
#endif |
#if defined(PNG_READ_pHYs_SUPPORTED) |
PNG_pHYs; |
#endif |
#if defined(PNG_READ_sBIT_SUPPORTED) |
PNG_sBIT; |
#endif |
#if defined(PNG_READ_sCAL_SUPPORTED) |
PNG_sCAL; |
#endif |
#if defined(PNG_READ_sPLT_SUPPORTED) |
PNG_sPLT; |
#endif |
#if defined(PNG_READ_sRGB_SUPPORTED) |
PNG_sRGB; |
#endif |
#if defined(PNG_READ_tEXt_SUPPORTED) |
PNG_tEXt; |
#endif |
#if defined(PNG_READ_tIME_SUPPORTED) |
PNG_tIME; |
#endif |
#if defined(PNG_READ_tRNS_SUPPORTED) |
PNG_tRNS; |
#endif |
#if defined(PNG_READ_zTXt_SUPPORTED) |
PNG_zTXt; |
#endif |
#endif /* PNG_GLOBAL_ARRAYS */ |
png_byte chunk_length[4]; |
png_uint_32 length; |
png_read_data(png_ptr, chunk_length, 4); |
length = png_get_uint_32(chunk_length); |
png_reset_crc(png_ptr); |
png_crc_read(png_ptr, png_ptr->chunk_name, 4); |
png_debug2(0, "Reading %s chunk, length=%lu.\n", png_ptr->chunk_name, |
length); |
if (length > PNG_MAX_UINT) |
png_error(png_ptr, "Invalid chunk length."); |
/* This should be a binary subdivision search or a hash for |
* matching the chunk name rather than a linear search. |
*/ |
if (!png_memcmp(png_ptr->chunk_name, png_IHDR, 4)) |
png_handle_IHDR(png_ptr, info_ptr, length); |
else if (!png_memcmp(png_ptr->chunk_name, png_IEND, 4)) |
png_handle_IEND(png_ptr, info_ptr, length); |
#ifdef PNG_HANDLE_AS_UNKNOWN_SUPPORTED |
else if (png_handle_as_unknown(png_ptr, png_ptr->chunk_name)) |
{ |
if (!png_memcmp(png_ptr->chunk_name, png_IDAT, 4)) |
png_ptr->mode |= PNG_HAVE_IDAT; |
png_handle_unknown(png_ptr, info_ptr, length); |
if (!png_memcmp(png_ptr->chunk_name, png_PLTE, 4)) |
png_ptr->mode |= PNG_HAVE_PLTE; |
else if (!png_memcmp(png_ptr->chunk_name, png_IDAT, 4)) |
{ |
if (!(png_ptr->mode & PNG_HAVE_IHDR)) |
png_error(png_ptr, "Missing IHDR before IDAT"); |
else if (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE && |
!(png_ptr->mode & PNG_HAVE_PLTE)) |
png_error(png_ptr, "Missing PLTE before IDAT"); |
break; |
} |
} |
#endif |
else if (!png_memcmp(png_ptr->chunk_name, png_PLTE, 4)) |
png_handle_PLTE(png_ptr, info_ptr, length); |
else if (!png_memcmp(png_ptr->chunk_name, png_IDAT, 4)) |
{ |
if (!(png_ptr->mode & PNG_HAVE_IHDR)) |
png_error(png_ptr, "Missing IHDR before IDAT"); |
else if (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE && |
!(png_ptr->mode & PNG_HAVE_PLTE)) |
png_error(png_ptr, "Missing PLTE before IDAT"); |
png_ptr->idat_size = length; |
png_ptr->mode |= PNG_HAVE_IDAT; |
break; |
} |
#if defined(PNG_READ_bKGD_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_bKGD, 4)) |
png_handle_bKGD(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_cHRM_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_cHRM, 4)) |
png_handle_cHRM(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_gAMA_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_gAMA, 4)) |
png_handle_gAMA(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_hIST_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_hIST, 4)) |
png_handle_hIST(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_oFFs_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_oFFs, 4)) |
png_handle_oFFs(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_pCAL_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_pCAL, 4)) |
png_handle_pCAL(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_sCAL_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_sCAL, 4)) |
png_handle_sCAL(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_pHYs_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_pHYs, 4)) |
png_handle_pHYs(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_sBIT_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_sBIT, 4)) |
png_handle_sBIT(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_sRGB_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_sRGB, 4)) |
png_handle_sRGB(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_iCCP_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_iCCP, 4)) |
png_handle_iCCP(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_sPLT_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_sPLT, 4)) |
png_handle_sPLT(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_tEXt_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_tEXt, 4)) |
png_handle_tEXt(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_tIME_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_tIME, 4)) |
png_handle_tIME(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_tRNS_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_tRNS, 4)) |
png_handle_tRNS(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_zTXt_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_zTXt, 4)) |
png_handle_zTXt(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_iTXt_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_iTXt, 4)) |
png_handle_iTXt(png_ptr, info_ptr, length); |
#endif |
else |
png_handle_unknown(png_ptr, info_ptr, length); |
} |
} |
/* optional call to update the users info_ptr structure */ |
void PNGAPI |
png_read_update_info(png_structp png_ptr, png_infop info_ptr) |
{ |
png_debug(1, "in png_read_update_info\n"); |
if (!(png_ptr->flags & PNG_FLAG_ROW_INIT)) |
png_read_start_row(png_ptr); |
else |
png_warning(png_ptr, |
"Ignoring extra png_read_update_info() call; row buffer not reallocated"); |
png_read_transform_info(png_ptr, info_ptr); |
} |
/* Initialize palette, background, etc, after transformations |
* are set, but before any reading takes place. This allows |
* the user to obtain a gamma-corrected palette, for example. |
* If the user doesn't call this, we will do it ourselves. |
*/ |
void PNGAPI |
png_start_read_image(png_structp png_ptr) |
{ |
png_debug(1, "in png_start_read_image\n"); |
if (!(png_ptr->flags & PNG_FLAG_ROW_INIT)) |
png_read_start_row(png_ptr); |
} |
void PNGAPI |
png_read_row(png_structp png_ptr, png_bytep row, png_bytep dsp_row) |
{ |
#ifdef PNG_USE_LOCAL_ARRAYS |
PNG_IDAT; |
const int png_pass_dsp_mask[7] = {0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff}; |
const int png_pass_mask[7] = {0x80, 0x08, 0x88, 0x22, 0xaa, 0x55, 0xff}; |
#endif |
int ret; |
png_debug2(1, "in png_read_row (row %lu, pass %d)\n", |
png_ptr->row_number, png_ptr->pass); |
if (!(png_ptr->flags & PNG_FLAG_ROW_INIT)) |
png_read_start_row(png_ptr); |
if (png_ptr->row_number == 0 && png_ptr->pass == 0) |
{ |
/* check for transforms that have been set but were defined out */ |
#if defined(PNG_WRITE_INVERT_SUPPORTED) && !defined(PNG_READ_INVERT_SUPPORTED) |
if (png_ptr->transformations & PNG_INVERT_MONO) |
png_warning(png_ptr, "PNG_READ_INVERT_SUPPORTED is not defined."); |
#endif |
#if defined(PNG_WRITE_FILLER_SUPPORTED) && !defined(PNG_READ_FILLER_SUPPORTED) |
if (png_ptr->transformations & PNG_FILLER) |
png_warning(png_ptr, "PNG_READ_FILLER_SUPPORTED is not defined."); |
#endif |
#if defined(PNG_WRITE_PACKSWAP_SUPPORTED) && !defined(PNG_READ_PACKSWAP_SUPPORTED) |
if (png_ptr->transformations & PNG_PACKSWAP) |
png_warning(png_ptr, "PNG_READ_PACKSWAP_SUPPORTED is not defined."); |
#endif |
#if defined(PNG_WRITE_PACK_SUPPORTED) && !defined(PNG_READ_PACK_SUPPORTED) |
if (png_ptr->transformations & PNG_PACK) |
png_warning(png_ptr, "PNG_READ_PACK_SUPPORTED is not defined."); |
#endif |
#if defined(PNG_WRITE_SHIFT_SUPPORTED) && !defined(PNG_READ_SHIFT_SUPPORTED) |
if (png_ptr->transformations & PNG_SHIFT) |
png_warning(png_ptr, "PNG_READ_SHIFT_SUPPORTED is not defined."); |
#endif |
#if defined(PNG_WRITE_BGR_SUPPORTED) && !defined(PNG_READ_BGR_SUPPORTED) |
if (png_ptr->transformations & PNG_BGR) |
png_warning(png_ptr, "PNG_READ_BGR_SUPPORTED is not defined."); |
#endif |
#if defined(PNG_WRITE_SWAP_SUPPORTED) && !defined(PNG_READ_SWAP_SUPPORTED) |
if (png_ptr->transformations & PNG_SWAP_BYTES) |
png_warning(png_ptr, "PNG_READ_SWAP_SUPPORTED is not defined."); |
#endif |
} |
#if defined(PNG_READ_INTERLACING_SUPPORTED) |
/* if interlaced and we do not need a new row, combine row and return */ |
if (png_ptr->interlaced && (png_ptr->transformations & PNG_INTERLACE)) |
{ |
switch (png_ptr->pass) |
{ |
case 0: |
if (png_ptr->row_number & 0x07) |
{ |
if (dsp_row != NULL) |
png_combine_row(png_ptr, dsp_row, |
png_pass_dsp_mask[png_ptr->pass]); |
png_read_finish_row(png_ptr); |
return; |
} |
break; |
case 1: |
if ((png_ptr->row_number & 0x07) || png_ptr->width < 5) |
{ |
if (dsp_row != NULL) |
png_combine_row(png_ptr, dsp_row, |
png_pass_dsp_mask[png_ptr->pass]); |
png_read_finish_row(png_ptr); |
return; |
} |
break; |
case 2: |
if ((png_ptr->row_number & 0x07) != 4) |
{ |
if (dsp_row != NULL && (png_ptr->row_number & 4)) |
png_combine_row(png_ptr, dsp_row, |
png_pass_dsp_mask[png_ptr->pass]); |
png_read_finish_row(png_ptr); |
return; |
} |
break; |
case 3: |
if ((png_ptr->row_number & 3) || png_ptr->width < 3) |
{ |
if (dsp_row != NULL) |
png_combine_row(png_ptr, dsp_row, |
png_pass_dsp_mask[png_ptr->pass]); |
png_read_finish_row(png_ptr); |
return; |
} |
break; |
case 4: |
if ((png_ptr->row_number & 3) != 2) |
{ |
if (dsp_row != NULL && (png_ptr->row_number & 2)) |
png_combine_row(png_ptr, dsp_row, |
png_pass_dsp_mask[png_ptr->pass]); |
png_read_finish_row(png_ptr); |
return; |
} |
break; |
case 5: |
if ((png_ptr->row_number & 1) || png_ptr->width < 2) |
{ |
if (dsp_row != NULL) |
png_combine_row(png_ptr, dsp_row, |
png_pass_dsp_mask[png_ptr->pass]); |
png_read_finish_row(png_ptr); |
return; |
} |
break; |
case 6: |
if (!(png_ptr->row_number & 1)) |
{ |
png_read_finish_row(png_ptr); |
return; |
} |
break; |
} |
} |
#endif |
if (!(png_ptr->mode & PNG_HAVE_IDAT)) |
png_error(png_ptr, "Invalid attempt to read row data"); |
png_ptr->zstream.next_out = png_ptr->row_buf; |
png_ptr->zstream.avail_out = (uInt)png_ptr->irowbytes; |
do |
{ |
if (!(png_ptr->zstream.avail_in)) |
{ |
while (!png_ptr->idat_size) |
{ |
png_byte chunk_length[4]; |
png_crc_finish(png_ptr, 0); |
png_read_data(png_ptr, chunk_length, 4); |
png_ptr->idat_size = png_get_uint_32(chunk_length); |
if (png_ptr->idat_size > PNG_MAX_UINT) |
png_error(png_ptr, "Invalid chunk length."); |
png_reset_crc(png_ptr); |
png_crc_read(png_ptr, png_ptr->chunk_name, 4); |
if (png_memcmp(png_ptr->chunk_name, png_IDAT, 4)) |
png_error(png_ptr, "Not enough image data"); |
} |
png_ptr->zstream.avail_in = (uInt)png_ptr->zbuf_size; |
png_ptr->zstream.next_in = png_ptr->zbuf; |
if (png_ptr->zbuf_size > png_ptr->idat_size) |
png_ptr->zstream.avail_in = (uInt)png_ptr->idat_size; |
png_crc_read(png_ptr, png_ptr->zbuf, |
(png_size_t)png_ptr->zstream.avail_in); |
png_ptr->idat_size -= png_ptr->zstream.avail_in; |
} |
ret = inflate(&png_ptr->zstream, Z_PARTIAL_FLUSH); |
if (ret == Z_STREAM_END) |
{ |
if (png_ptr->zstream.avail_out || png_ptr->zstream.avail_in || |
png_ptr->idat_size) |
png_error(png_ptr, "Extra compressed data"); |
png_ptr->mode |= PNG_AFTER_IDAT; |
png_ptr->flags |= PNG_FLAG_ZLIB_FINISHED; |
break; |
} |
if (ret != Z_OK) |
png_error(png_ptr, png_ptr->zstream.msg ? png_ptr->zstream.msg : |
"Decompression error"); |
} while (png_ptr->zstream.avail_out); |
png_ptr->row_info.color_type = png_ptr->color_type; |
png_ptr->row_info.width = png_ptr->iwidth; |
png_ptr->row_info.channels = png_ptr->channels; |
png_ptr->row_info.bit_depth = png_ptr->bit_depth; |
png_ptr->row_info.pixel_depth = png_ptr->pixel_depth; |
png_ptr->row_info.rowbytes = ((png_ptr->row_info.width * |
(png_uint_32)png_ptr->row_info.pixel_depth + 7) >> 3); |
if(png_ptr->row_buf[0]) |
png_read_filter_row(png_ptr, &(png_ptr->row_info), |
png_ptr->row_buf + 1, png_ptr->prev_row + 1, |
(int)(png_ptr->row_buf[0])); |
png_memcpy_check(png_ptr, png_ptr->prev_row, png_ptr->row_buf, |
png_ptr->rowbytes + 1); |
#if defined(PNG_MNG_FEATURES_SUPPORTED) |
if((png_ptr->mng_features_permitted & PNG_FLAG_MNG_FILTER_64) && |
(png_ptr->filter_type == PNG_INTRAPIXEL_DIFFERENCING)) |
{ |
/* Intrapixel differencing */ |
png_do_read_intrapixel(&(png_ptr->row_info), png_ptr->row_buf + 1); |
} |
#endif |
if (png_ptr->transformations) |
png_do_read_transformations(png_ptr); |
#if defined(PNG_READ_INTERLACING_SUPPORTED) |
/* blow up interlaced rows to full size */ |
if (png_ptr->interlaced && |
(png_ptr->transformations & PNG_INTERLACE)) |
{ |
if (png_ptr->pass < 6) |
/* old interface (pre-1.0.9): |
png_do_read_interlace(&(png_ptr->row_info), |
png_ptr->row_buf + 1, png_ptr->pass, png_ptr->transformations); |
*/ |
png_do_read_interlace(png_ptr); |
if (dsp_row != NULL) |
png_combine_row(png_ptr, dsp_row, |
png_pass_dsp_mask[png_ptr->pass]); |
if (row != NULL) |
png_combine_row(png_ptr, row, |
png_pass_mask[png_ptr->pass]); |
} |
else |
#endif |
{ |
if (row != NULL) |
png_combine_row(png_ptr, row, 0xff); |
if (dsp_row != NULL) |
png_combine_row(png_ptr, dsp_row, 0xff); |
} |
png_read_finish_row(png_ptr); |
if (png_ptr->read_row_fn != NULL) |
(*(png_ptr->read_row_fn))(png_ptr, png_ptr->row_number, png_ptr->pass); |
} |
/* Read one or more rows of image data. If the image is interlaced, |
* and png_set_interlace_handling() has been called, the rows need to |
* contain the contents of the rows from the previous pass. If the |
* image has alpha or transparency, and png_handle_alpha()[*] has been |
* called, the rows contents must be initialized to the contents of the |
* screen. |
* |
* "row" holds the actual image, and pixels are placed in it |
* as they arrive. If the image is displayed after each pass, it will |
* appear to "sparkle" in. "display_row" can be used to display a |
* "chunky" progressive image, with finer detail added as it becomes |
* available. If you do not want this "chunky" display, you may pass |
* NULL for display_row. If you do not want the sparkle display, and |
* you have not called png_handle_alpha(), you may pass NULL for rows. |
* If you have called png_handle_alpha(), and the image has either an |
* alpha channel or a transparency chunk, you must provide a buffer for |
* rows. In this case, you do not have to provide a display_row buffer |
* also, but you may. If the image is not interlaced, or if you have |
* not called png_set_interlace_handling(), the display_row buffer will |
* be ignored, so pass NULL to it. |
* |
* [*] png_handle_alpha() does not exist yet, as of libpng version 1.2.5 |
*/ |
void PNGAPI |
png_read_rows(png_structp png_ptr, png_bytepp row, |
png_bytepp display_row, png_uint_32 num_rows) |
{ |
png_uint_32 i; |
png_bytepp rp; |
png_bytepp dp; |
png_debug(1, "in png_read_rows\n"); |
rp = row; |
dp = display_row; |
if (rp != NULL && dp != NULL) |
for (i = 0; i < num_rows; i++) |
{ |
png_bytep rptr = *rp++; |
png_bytep dptr = *dp++; |
png_read_row(png_ptr, rptr, dptr); |
} |
else if(rp != NULL) |
for (i = 0; i < num_rows; i++) |
{ |
png_bytep rptr = *rp; |
png_read_row(png_ptr, rptr, png_bytep_NULL); |
rp++; |
} |
else if(dp != NULL) |
for (i = 0; i < num_rows; i++) |
{ |
png_bytep dptr = *dp; |
png_read_row(png_ptr, png_bytep_NULL, dptr); |
dp++; |
} |
} |
/* Read the entire image. If the image has an alpha channel or a tRNS |
* chunk, and you have called png_handle_alpha()[*], you will need to |
* initialize the image to the current image that PNG will be overlaying. |
* We set the num_rows again here, in case it was incorrectly set in |
* png_read_start_row() by a call to png_read_update_info() or |
* png_start_read_image() if png_set_interlace_handling() wasn't called |
* prior to either of these functions like it should have been. You can |
* only call this function once. If you desire to have an image for |
* each pass of a interlaced image, use png_read_rows() instead. |
* |
* [*] png_handle_alpha() does not exist yet, as of libpng version 1.2.5 |
*/ |
void PNGAPI |
png_read_image(png_structp png_ptr, png_bytepp image) |
{ |
png_uint_32 i,image_height; |
int pass, j; |
png_bytepp rp; |
png_debug(1, "in png_read_image\n"); |
#ifdef PNG_READ_INTERLACING_SUPPORTED |
pass = png_set_interlace_handling(png_ptr); |
#else |
if (png_ptr->interlaced) |
png_error(png_ptr, |
"Cannot read interlaced image -- interlace handler disabled."); |
pass = 1; |
#endif |
image_height=png_ptr->height; |
png_ptr->num_rows = image_height; /* Make sure this is set correctly */ |
for (j = 0; j < pass; j++) |
{ |
rp = image; |
for (i = 0; i < image_height; i++) |
{ |
png_read_row(png_ptr, *rp, png_bytep_NULL); |
rp++; |
} |
} |
} |
/* Read the end of the PNG file. Will not read past the end of the |
* file, will verify the end is accurate, and will read any comments |
* or time information at the end of the file, if info is not NULL. |
*/ |
void PNGAPI |
png_read_end(png_structp png_ptr, png_infop info_ptr) |
{ |
png_byte chunk_length[4]; |
png_uint_32 length; |
png_debug(1, "in png_read_end\n"); |
png_crc_finish(png_ptr, 0); /* Finish off CRC from last IDAT chunk */ |
do |
{ |
#ifdef PNG_USE_LOCAL_ARRAYS |
PNG_IHDR; |
PNG_IDAT; |
PNG_IEND; |
PNG_PLTE; |
#if defined(PNG_READ_bKGD_SUPPORTED) |
PNG_bKGD; |
#endif |
#if defined(PNG_READ_cHRM_SUPPORTED) |
PNG_cHRM; |
#endif |
#if defined(PNG_READ_gAMA_SUPPORTED) |
PNG_gAMA; |
#endif |
#if defined(PNG_READ_hIST_SUPPORTED) |
PNG_hIST; |
#endif |
#if defined(PNG_READ_iCCP_SUPPORTED) |
PNG_iCCP; |
#endif |
#if defined(PNG_READ_iTXt_SUPPORTED) |
PNG_iTXt; |
#endif |
#if defined(PNG_READ_oFFs_SUPPORTED) |
PNG_oFFs; |
#endif |
#if defined(PNG_READ_pCAL_SUPPORTED) |
PNG_pCAL; |
#endif |
#if defined(PNG_READ_pHYs_SUPPORTED) |
PNG_pHYs; |
#endif |
#if defined(PNG_READ_sBIT_SUPPORTED) |
PNG_sBIT; |
#endif |
#if defined(PNG_READ_sCAL_SUPPORTED) |
PNG_sCAL; |
#endif |
#if defined(PNG_READ_sPLT_SUPPORTED) |
PNG_sPLT; |
#endif |
#if defined(PNG_READ_sRGB_SUPPORTED) |
PNG_sRGB; |
#endif |
#if defined(PNG_READ_tEXt_SUPPORTED) |
PNG_tEXt; |
#endif |
#if defined(PNG_READ_tIME_SUPPORTED) |
PNG_tIME; |
#endif |
#if defined(PNG_READ_tRNS_SUPPORTED) |
PNG_tRNS; |
#endif |
#if defined(PNG_READ_zTXt_SUPPORTED) |
PNG_zTXt; |
#endif |
#endif /* PNG_GLOBAL_ARRAYS */ |
png_read_data(png_ptr, chunk_length, 4); |
length = png_get_uint_32(chunk_length); |
png_reset_crc(png_ptr); |
png_crc_read(png_ptr, png_ptr->chunk_name, 4); |
png_debug1(0, "Reading %s chunk.\n", png_ptr->chunk_name); |
if (length > PNG_MAX_UINT) |
png_error(png_ptr, "Invalid chunk length."); |
if (!png_memcmp(png_ptr->chunk_name, png_IHDR, 4)) |
png_handle_IHDR(png_ptr, info_ptr, length); |
else if (!png_memcmp(png_ptr->chunk_name, png_IEND, 4)) |
png_handle_IEND(png_ptr, info_ptr, length); |
#ifdef PNG_HANDLE_AS_UNKNOWN_SUPPORTED |
else if (png_handle_as_unknown(png_ptr, png_ptr->chunk_name)) |
{ |
if (!png_memcmp(png_ptr->chunk_name, png_IDAT, 4)) |
{ |
if (length > 0 || png_ptr->mode & PNG_AFTER_IDAT) |
png_error(png_ptr, "Too many IDAT's found"); |
} |
else |
png_ptr->mode |= PNG_AFTER_IDAT; |
png_handle_unknown(png_ptr, info_ptr, length); |
if (!png_memcmp(png_ptr->chunk_name, png_PLTE, 4)) |
png_ptr->mode |= PNG_HAVE_PLTE; |
} |
#endif |
else if (!png_memcmp(png_ptr->chunk_name, png_IDAT, 4)) |
{ |
/* Zero length IDATs are legal after the last IDAT has been |
* read, but not after other chunks have been read. |
*/ |
if (length > 0 || png_ptr->mode & PNG_AFTER_IDAT) |
png_error(png_ptr, "Too many IDAT's found"); |
png_crc_finish(png_ptr, length); |
} |
else if (!png_memcmp(png_ptr->chunk_name, png_PLTE, 4)) |
png_handle_PLTE(png_ptr, info_ptr, length); |
#if defined(PNG_READ_bKGD_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_bKGD, 4)) |
png_handle_bKGD(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_cHRM_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_cHRM, 4)) |
png_handle_cHRM(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_gAMA_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_gAMA, 4)) |
png_handle_gAMA(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_hIST_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_hIST, 4)) |
png_handle_hIST(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_oFFs_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_oFFs, 4)) |
png_handle_oFFs(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_pCAL_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_pCAL, 4)) |
png_handle_pCAL(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_sCAL_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_sCAL, 4)) |
png_handle_sCAL(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_pHYs_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_pHYs, 4)) |
png_handle_pHYs(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_sBIT_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_sBIT, 4)) |
png_handle_sBIT(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_sRGB_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_sRGB, 4)) |
png_handle_sRGB(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_iCCP_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_iCCP, 4)) |
png_handle_iCCP(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_sPLT_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_sPLT, 4)) |
png_handle_sPLT(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_tEXt_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_tEXt, 4)) |
png_handle_tEXt(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_tIME_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_tIME, 4)) |
png_handle_tIME(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_tRNS_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_tRNS, 4)) |
png_handle_tRNS(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_zTXt_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_zTXt, 4)) |
png_handle_zTXt(png_ptr, info_ptr, length); |
#endif |
#if defined(PNG_READ_iTXt_SUPPORTED) |
else if (!png_memcmp(png_ptr->chunk_name, png_iTXt, 4)) |
png_handle_iTXt(png_ptr, info_ptr, length); |
#endif |
else |
png_handle_unknown(png_ptr, info_ptr, length); |
} while (!(png_ptr->mode & PNG_HAVE_IEND)); |
} |
/* free all memory used by the read */ |
void PNGAPI |
png_destroy_read_struct(png_structpp png_ptr_ptr, png_infopp info_ptr_ptr, |
png_infopp end_info_ptr_ptr) |
{ |
png_structp png_ptr = NULL; |
png_infop info_ptr = NULL, end_info_ptr = NULL; |
#ifdef PNG_USER_MEM_SUPPORTED |
png_free_ptr free_fn = NULL; |
png_voidp mem_ptr = NULL; |
#endif |
png_debug(1, "in png_destroy_read_struct\n"); |
if (png_ptr_ptr != NULL) |
png_ptr = *png_ptr_ptr; |
if (info_ptr_ptr != NULL) |
info_ptr = *info_ptr_ptr; |
if (end_info_ptr_ptr != NULL) |
end_info_ptr = *end_info_ptr_ptr; |
#ifdef PNG_USER_MEM_SUPPORTED |
free_fn = png_ptr->free_fn; |
mem_ptr = png_ptr->mem_ptr; |
#endif |
png_read_destroy(png_ptr, info_ptr, end_info_ptr); |
if (info_ptr != NULL) |
{ |
#if defined(PNG_TEXT_SUPPORTED) |
png_free_data(png_ptr, info_ptr, PNG_FREE_TEXT, -1); |
#endif |
#ifdef PNG_USER_MEM_SUPPORTED |
png_destroy_struct_2((png_voidp)info_ptr, (png_free_ptr)free_fn, |
(png_voidp)mem_ptr); |
#else |
png_destroy_struct((png_voidp)info_ptr); |
#endif |
*info_ptr_ptr = NULL; |
} |
if (end_info_ptr != NULL) |
{ |
#if defined(PNG_READ_TEXT_SUPPORTED) |
png_free_data(png_ptr, end_info_ptr, PNG_FREE_TEXT, -1); |
#endif |
#ifdef PNG_USER_MEM_SUPPORTED |
png_destroy_struct_2((png_voidp)end_info_ptr, (png_free_ptr)free_fn, |
(png_voidp)mem_ptr); |
#else |
png_destroy_struct((png_voidp)end_info_ptr); |
#endif |
*end_info_ptr_ptr = NULL; |
} |
if (png_ptr != NULL) |
{ |
#ifdef PNG_USER_MEM_SUPPORTED |
png_destroy_struct_2((png_voidp)png_ptr, (png_free_ptr)free_fn, |
(png_voidp)mem_ptr); |
#else |
png_destroy_struct((png_voidp)png_ptr); |
#endif |
*png_ptr_ptr = NULL; |
} |
} |
/* free all memory used by the read (old method) */ |
void /* PRIVATE */ |
png_read_destroy(png_structp png_ptr, png_infop info_ptr, png_infop end_info_ptr) |
{ |
#ifdef PNG_SETJMP_SUPPORTED |
jmp_buf tmp_jmp; |
#endif |
png_error_ptr error_fn; |
png_error_ptr warning_fn; |
png_voidp error_ptr; |
#ifdef PNG_USER_MEM_SUPPORTED |
png_free_ptr free_fn; |
#endif |
png_debug(1, "in png_read_destroy\n"); |
if (info_ptr != NULL) |
png_info_destroy(png_ptr, info_ptr); |
if (end_info_ptr != NULL) |
png_info_destroy(png_ptr, end_info_ptr); |
png_free(png_ptr, png_ptr->zbuf); |
png_free(png_ptr, png_ptr->big_row_buf); |
png_free(png_ptr, png_ptr->prev_row); |
#if defined(PNG_READ_DITHER_SUPPORTED) |
png_free(png_ptr, png_ptr->palette_lookup); |
png_free(png_ptr, png_ptr->dither_index); |
#endif |
#if defined(PNG_READ_GAMMA_SUPPORTED) |
png_free(png_ptr, png_ptr->gamma_table); |
#endif |
#if defined(PNG_READ_BACKGROUND_SUPPORTED) |
png_free(png_ptr, png_ptr->gamma_from_1); |
png_free(png_ptr, png_ptr->gamma_to_1); |
#endif |
#ifdef PNG_FREE_ME_SUPPORTED |
if (png_ptr->free_me & PNG_FREE_PLTE) |
png_zfree(png_ptr, png_ptr->palette); |
png_ptr->free_me &= ~PNG_FREE_PLTE; |
#else |
if (png_ptr->flags & PNG_FLAG_FREE_PLTE) |
png_zfree(png_ptr, png_ptr->palette); |
png_ptr->flags &= ~PNG_FLAG_FREE_PLTE; |
#endif |
#if defined(PNG_tRNS_SUPPORTED) || \ |
defined(PNG_READ_EXPAND_SUPPORTED) || defined(PNG_READ_BACKGROUND_SUPPORTED) |
#ifdef PNG_FREE_ME_SUPPORTED |
if (png_ptr->free_me & PNG_FREE_TRNS) |
png_free(png_ptr, png_ptr->trans); |
png_ptr->free_me &= ~PNG_FREE_TRNS; |
#else |
if (png_ptr->flags & PNG_FLAG_FREE_TRNS) |
png_free(png_ptr, png_ptr->trans); |
png_ptr->flags &= ~PNG_FLAG_FREE_TRNS; |
#endif |
#endif |
#if defined(PNG_READ_hIST_SUPPORTED) |
#ifdef PNG_FREE_ME_SUPPORTED |
if (png_ptr->free_me & PNG_FREE_HIST) |
png_free(png_ptr, png_ptr->hist); |
png_ptr->free_me &= ~PNG_FREE_HIST; |
#else |
if (png_ptr->flags & PNG_FLAG_FREE_HIST) |
png_free(png_ptr, png_ptr->hist); |
png_ptr->flags &= ~PNG_FLAG_FREE_HIST; |
#endif |
#endif |
#if defined(PNG_READ_GAMMA_SUPPORTED) |
if (png_ptr->gamma_16_table != NULL) |
{ |
int i; |
int istop = (1 << (8 - png_ptr->gamma_shift)); |
for (i = 0; i < istop; i++) |
{ |
png_free(png_ptr, png_ptr->gamma_16_table[i]); |
} |
png_free(png_ptr, png_ptr->gamma_16_table); |
} |
#if defined(PNG_READ_BACKGROUND_SUPPORTED) |
if (png_ptr->gamma_16_from_1 != NULL) |
{ |
int i; |
int istop = (1 << (8 - png_ptr->gamma_shift)); |
for (i = 0; i < istop; i++) |
{ |
png_free(png_ptr, png_ptr->gamma_16_from_1[i]); |
} |
png_free(png_ptr, png_ptr->gamma_16_from_1); |
} |
if (png_ptr->gamma_16_to_1 != NULL) |
{ |
int i; |
int istop = (1 << (8 - png_ptr->gamma_shift)); |
for (i = 0; i < istop; i++) |
{ |
png_free(png_ptr, png_ptr->gamma_16_to_1[i]); |
} |
png_free(png_ptr, png_ptr->gamma_16_to_1); |
} |
#endif |
#endif |
#if defined(PNG_TIME_RFC1123_SUPPORTED) |
png_free(png_ptr, png_ptr->time_buffer); |
#endif |
inflateEnd(&png_ptr->zstream); |
#ifdef PNG_PROGRESSIVE_READ_SUPPORTED |
png_free(png_ptr, png_ptr->save_buffer); |
#endif |
#ifdef PNG_PROGRESSIVE_READ_SUPPORTED |
#ifdef PNG_TEXT_SUPPORTED |
png_free(png_ptr, png_ptr->current_text); |
#endif /* PNG_TEXT_SUPPORTED */ |
#endif /* PNG_PROGRESSIVE_READ_SUPPORTED */ |
/* Save the important info out of the png_struct, in case it is |
* being used again. |
*/ |
#ifdef PNG_SETJMP_SUPPORTED |
png_memcpy(tmp_jmp, png_ptr->jmpbuf, sizeof (jmp_buf)); |
#endif |
error_fn = png_ptr->error_fn; |
warning_fn = png_ptr->warning_fn; |
error_ptr = png_ptr->error_ptr; |
#ifdef PNG_USER_MEM_SUPPORTED |
free_fn = png_ptr->free_fn; |
#endif |
png_memset(png_ptr, 0, sizeof (png_struct)); |
png_ptr->error_fn = error_fn; |
png_ptr->warning_fn = warning_fn; |
png_ptr->error_ptr = error_ptr; |
#ifdef PNG_USER_MEM_SUPPORTED |
png_ptr->free_fn = free_fn; |
#endif |
#ifdef PNG_SETJMP_SUPPORTED |
png_memcpy(png_ptr->jmpbuf, tmp_jmp, sizeof (jmp_buf)); |
#endif |
} |
void PNGAPI |
png_set_read_status_fn(png_structp png_ptr, png_read_status_ptr read_row_fn) |
{ |
png_ptr->read_row_fn = read_row_fn; |
} |
#if defined(PNG_INFO_IMAGE_SUPPORTED) |
void PNGAPI |
png_read_png(png_structp png_ptr, png_infop info_ptr, |
int transforms, |
voidp params) |
{ |
int row; |
#if defined(PNG_READ_INVERT_ALPHA_SUPPORTED) |
/* invert the alpha channel from opacity to transparency */ |
if (transforms & PNG_TRANSFORM_INVERT_ALPHA) |
png_set_invert_alpha(png_ptr); |
#endif |
/* The call to png_read_info() gives us all of the information from the |
* PNG file before the first IDAT (image data chunk). |
*/ |
png_read_info(png_ptr, info_ptr); |
/* -------------- image transformations start here ------------------- */ |
#if defined(PNG_READ_16_TO_8_SUPPORTED) |
/* tell libpng to strip 16 bit/color files down to 8 bits/color */ |
if (transforms & PNG_TRANSFORM_STRIP_16) |
png_set_strip_16(png_ptr); |
#endif |
#if defined(PNG_READ_STRIP_ALPHA_SUPPORTED) |
/* Strip alpha bytes from the input data without combining with the |
* background (not recommended). |
*/ |
if (transforms & PNG_TRANSFORM_STRIP_ALPHA) |
png_set_strip_alpha(png_ptr); |
#endif |
#if defined(PNG_READ_PACK_SUPPORTED) && !defined(PNG_READ_EXPAND_SUPPORTED) |
/* Extract multiple pixels with bit depths of 1, 2, and 4 from a single |
* byte into separate bytes (useful for paletted and grayscale images). |
*/ |
if (transforms & PNG_TRANSFORM_PACKING) |
png_set_packing(png_ptr); |
#endif |
#if defined(PNG_READ_PACKSWAP_SUPPORTED) |
/* Change the order of packed pixels to least significant bit first |
* (not useful if you are using png_set_packing). */ |
if (transforms & PNG_TRANSFORM_PACKSWAP) |
png_set_packswap(png_ptr); |
#endif |
#if defined(PNG_READ_EXPAND_SUPPORTED) |
/* Expand paletted colors into true RGB triplets |
* Expand grayscale images to full 8 bits from 1, 2, or 4 bits/pixel |
* Expand paletted or RGB images with transparency to full alpha |
* channels so the data will be available as RGBA quartets. |
*/ |
if (transforms & PNG_TRANSFORM_EXPAND) |
if ((png_ptr->bit_depth < 8) || |
(png_ptr->color_type == PNG_COLOR_TYPE_PALETTE) || |
(png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS))) |
png_set_expand(png_ptr); |
#endif |
/* We don't handle background color or gamma transformation or dithering. */ |
#if defined(PNG_READ_INVERT_SUPPORTED) |
/* invert monochrome files to have 0 as white and 1 as black */ |
if (transforms & PNG_TRANSFORM_INVERT_MONO) |
png_set_invert_mono(png_ptr); |
#endif |
#if defined(PNG_READ_SHIFT_SUPPORTED) |
/* If you want to shift the pixel values from the range [0,255] or |
* [0,65535] to the original [0,7] or [0,31], or whatever range the |
* colors were originally in: |
*/ |
if ((transforms & PNG_TRANSFORM_SHIFT) |
&& png_get_valid(png_ptr, info_ptr, PNG_INFO_sBIT)) |
{ |
png_color_8p sig_bit; |
png_get_sBIT(png_ptr, info_ptr, &sig_bit); |
png_set_shift(png_ptr, sig_bit); |
} |
#endif |
#if defined(PNG_READ_BGR_SUPPORTED) |
/* flip the RGB pixels to BGR (or RGBA to BGRA) */ |
if (transforms & PNG_TRANSFORM_BGR) |
png_set_bgr(png_ptr); |
#endif |
#if defined(PNG_READ_SWAP_ALPHA_SUPPORTED) |
/* swap the RGBA or GA data to ARGB or AG (or BGRA to ABGR) */ |
if (transforms & PNG_TRANSFORM_SWAP_ALPHA) |
png_set_swap_alpha(png_ptr); |
#endif |
#if defined(PNG_READ_SWAP_SUPPORTED) |
/* swap bytes of 16 bit files to least significant byte first */ |
if (transforms & PNG_TRANSFORM_SWAP_ENDIAN) |
png_set_swap(png_ptr); |
#endif |
/* We don't handle adding filler bytes */ |
/* Optional call to gamma correct and add the background to the palette |
* and update info structure. REQUIRED if you are expecting libpng to |
* update the palette for you (i.e., you selected such a transform above). |
*/ |
png_read_update_info(png_ptr, info_ptr); |
/* -------------- image transformations end here ------------------- */ |
#ifdef PNG_FREE_ME_SUPPORTED |
png_free_data(png_ptr, info_ptr, PNG_FREE_ROWS, 0); |
#endif |
if(info_ptr->row_pointers == NULL) |
{ |
info_ptr->row_pointers = (png_bytepp)png_malloc(png_ptr, |
info_ptr->height * sizeof(png_bytep)); |
#ifdef PNG_FREE_ME_SUPPORTED |
info_ptr->free_me |= PNG_FREE_ROWS; |
#endif |
for (row = 0; row < (int)info_ptr->height; row++) |
{ |
info_ptr->row_pointers[row] = (png_bytep)png_malloc(png_ptr, |
png_get_rowbytes(png_ptr, info_ptr)); |
} |
} |
png_read_image(png_ptr, info_ptr->row_pointers); |
info_ptr->valid |= PNG_INFO_IDAT; |
/* read rest of file, and get additional chunks in info_ptr - REQUIRED */ |
png_read_end(png_ptr, info_ptr); |
if(transforms == 0 || params == NULL) |
/* quiet compiler warnings */ return; |
} |
#endif |
/shark/trunk/ports/png/trees.c |
---|
0,0 → 1,1214 |
/* trees.c -- output deflated data using Huffman coding |
* Copyright (C) 1995-2002 Jean-loup Gailly |
* For conditions of distribution and use, see copyright notice in zlib.h |
*/ |
/* |
* ALGORITHM |
* |
* The "deflation" process uses several Huffman trees. The more |
* common source values are represented by shorter bit sequences. |
* |
* Each code tree is stored in a compressed form which is itself |
* a Huffman encoding of the lengths of all the code strings (in |
* ascending order by source values). The actual code strings are |
* reconstructed from the lengths in the inflate process, as described |
* in the deflate specification. |
* |
* REFERENCES |
* |
* Deutsch, L.P.,"'Deflate' Compressed Data Format Specification". |
* Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc |
* |
* Storer, James A. |
* Data Compression: Methods and Theory, pp. 49-50. |
* Computer Science Press, 1988. ISBN 0-7167-8156-5. |
* |
* Sedgewick, R. |
* Algorithms, p290. |
* Addison-Wesley, 1983. ISBN 0-201-06672-6. |
*/ |
/* @(#) $Id: trees.c,v 1.1 2003-03-20 13:08:13 giacomo Exp $ */ |
/* #define GEN_TREES_H */ |
#include "deflate.h" |
#ifdef DEBUG |
# include <ctype.h> |
#endif |
/* =========================================================================== |
* Constants |
*/ |
#define MAX_BL_BITS 7 |
/* Bit length codes must not exceed MAX_BL_BITS bits */ |
#define END_BLOCK 256 |
/* end of block literal code */ |
#define REP_3_6 16 |
/* repeat previous bit length 3-6 times (2 bits of repeat count) */ |
#define REPZ_3_10 17 |
/* repeat a zero length 3-10 times (3 bits of repeat count) */ |
#define REPZ_11_138 18 |
/* repeat a zero length 11-138 times (7 bits of repeat count) */ |
local const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */ |
= {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0}; |
local const int extra_dbits[D_CODES] /* extra bits for each distance code */ |
= {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; |
local const int extra_blbits[BL_CODES]/* extra bits for each bit length code */ |
= {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7}; |
local const uch bl_order[BL_CODES] |
= {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15}; |
/* The lengths of the bit length codes are sent in order of decreasing |
* probability, to avoid transmitting the lengths for unused bit length codes. |
*/ |
#define Buf_size (8 * 2*sizeof(char)) |
/* Number of bits used within bi_buf. (bi_buf might be implemented on |
* more than 16 bits on some systems.) |
*/ |
/* =========================================================================== |
* Local data. These are initialized only once. |
*/ |
#define DIST_CODE_LEN 512 /* see definition of array dist_code below */ |
#if defined(GEN_TREES_H) || !defined(STDC) |
/* non ANSI compilers may not accept trees.h */ |
local ct_data static_ltree[L_CODES+2]; |
/* The static literal tree. Since the bit lengths are imposed, there is no |
* need for the L_CODES extra codes used during heap construction. However |
* The codes 286 and 287 are needed to build a canonical tree (see _tr_init |
* below). |
*/ |
local ct_data static_dtree[D_CODES]; |
/* The static distance tree. (Actually a trivial tree since all codes use |
* 5 bits.) |
*/ |
uch _dist_code[DIST_CODE_LEN]; |
/* Distance codes. The first 256 values correspond to the distances |
* 3 .. 258, the last 256 values correspond to the top 8 bits of |
* the 15 bit distances. |
*/ |
uch _length_code[MAX_MATCH-MIN_MATCH+1]; |
/* length code for each normalized match length (0 == MIN_MATCH) */ |
local int base_length[LENGTH_CODES]; |
/* First normalized length for each code (0 = MIN_MATCH) */ |
local int base_dist[D_CODES]; |
/* First normalized distance for each code (0 = distance of 1) */ |
#else |
# include "trees.h" |
#endif /* GEN_TREES_H */ |
struct static_tree_desc_s { |
const ct_data *static_tree; /* static tree or NULL */ |
const intf *extra_bits; /* extra bits for each code or NULL */ |
int extra_base; /* base index for extra_bits */ |
int elems; /* max number of elements in the tree */ |
int max_length; /* max bit length for the codes */ |
}; |
local static_tree_desc static_l_desc = |
{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS}; |
local static_tree_desc static_d_desc = |
{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS}; |
local static_tree_desc static_bl_desc = |
{(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS}; |
/* =========================================================================== |
* Local (static) routines in this file. |
*/ |
local void tr_static_init OF((void)); |
local void init_block OF((deflate_state *s)); |
local void pqdownheap OF((deflate_state *s, ct_data *tree, int k)); |
local void gen_bitlen OF((deflate_state *s, tree_desc *desc)); |
local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count)); |
local void build_tree OF((deflate_state *s, tree_desc *desc)); |
local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code)); |
local void send_tree OF((deflate_state *s, ct_data *tree, int max_code)); |
local int build_bl_tree OF((deflate_state *s)); |
local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes, |
int blcodes)); |
local void compress_block OF((deflate_state *s, ct_data *ltree, |
ct_data *dtree)); |
local void set_data_type OF((deflate_state *s)); |
local unsigned bi_reverse OF((unsigned value, int length)); |
local void bi_windup OF((deflate_state *s)); |
local void bi_flush OF((deflate_state *s)); |
local void copy_block OF((deflate_state *s, charf *buf, unsigned len, |
int header)); |
#ifdef GEN_TREES_H |
local void gen_trees_header OF((void)); |
#endif |
#ifndef DEBUG |
# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len) |
/* Send a code of the given tree. c and tree must not have side effects */ |
#else /* DEBUG */ |
# define send_code(s, c, tree) \ |
{ if (z_verbose>2) cprintf("\ncd %3d ",(c)); \ |
send_bits(s, tree[c].Code, tree[c].Len); } |
#endif |
/* =========================================================================== |
* Output a short LSB first on the stream. |
* IN assertion: there is enough room in pendingBuf. |
*/ |
#define put_short(s, w) { \ |
put_byte(s, (uch)((w) & 0xff)); \ |
put_byte(s, (uch)((ush)(w) >> 8)); \ |
} |
/* =========================================================================== |
* Send a value on a given number of bits. |
* IN assertion: length <= 16 and value fits in length bits. |
*/ |
#ifdef DEBUG |
local void send_bits OF((deflate_state *s, int value, int length)); |
local void send_bits(s, value, length) |
deflate_state *s; |
int value; /* value to send */ |
int length; /* number of bits */ |
{ |
Tracevv((stderr," l %2d v %4x ", length, value)); |
Assert(length > 0 && length <= 15, "invalid length"); |
s->bits_sent += (ulg)length; |
/* If not enough room in bi_buf, use (valid) bits from bi_buf and |
* (16 - bi_valid) bits from value, leaving (width - (16-bi_valid)) |
* unused bits in value. |
*/ |
if (s->bi_valid > (int)Buf_size - length) { |
s->bi_buf |= (value << s->bi_valid); |
put_short(s, s->bi_buf); |
s->bi_buf = (ush)value >> (Buf_size - s->bi_valid); |
s->bi_valid += length - Buf_size; |
} else { |
s->bi_buf |= value << s->bi_valid; |
s->bi_valid += length; |
} |
} |
#else /* !DEBUG */ |
#define send_bits(s, value, length) \ |
{ int len = length;\ |
if (s->bi_valid > (int)Buf_size - len) {\ |
int val = value;\ |
s->bi_buf |= (val << s->bi_valid);\ |
put_short(s, s->bi_buf);\ |
s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\ |
s->bi_valid += len - Buf_size;\ |
} else {\ |
s->bi_buf |= (value) << s->bi_valid;\ |
s->bi_valid += len;\ |
}\ |
} |
#endif /* DEBUG */ |
#define MAX(a,b) (a >= b ? a : b) |
/* the arguments must not have side effects */ |
/* =========================================================================== |
* Initialize the various 'constant' tables. |
*/ |
local void tr_static_init() |
{ |
#if defined(GEN_TREES_H) || !defined(STDC) |
static int static_init_done = 0; |
int n; /* iterates over tree elements */ |
int bits; /* bit counter */ |
int length; /* length value */ |
int code; /* code value */ |
int dist; /* distance index */ |
ush bl_count[MAX_BITS+1]; |
/* number of codes at each bit length for an optimal tree */ |
if (static_init_done) return; |
/* For some embedded targets, global variables are not initialized: */ |
static_l_desc.static_tree = static_ltree; |
static_l_desc.extra_bits = extra_lbits; |
static_d_desc.static_tree = static_dtree; |
static_d_desc.extra_bits = extra_dbits; |
static_bl_desc.extra_bits = extra_blbits; |
/* Initialize the mapping length (0..255) -> length code (0..28) */ |
length = 0; |
for (code = 0; code < LENGTH_CODES-1; code++) { |
base_length[code] = length; |
for (n = 0; n < (1<<extra_lbits[code]); n++) { |
_length_code[length++] = (uch)code; |
} |
} |
Assert (length == 256, "tr_static_init: length != 256"); |
/* Note that the length 255 (match length 258) can be represented |
* in two different ways: code 284 + 5 bits or code 285, so we |
* overwrite length_code[255] to use the best encoding: |
*/ |
_length_code[length-1] = (uch)code; |
/* Initialize the mapping dist (0..32K) -> dist code (0..29) */ |
dist = 0; |
for (code = 0 ; code < 16; code++) { |
base_dist[code] = dist; |
for (n = 0; n < (1<<extra_dbits[code]); n++) { |
_dist_code[dist++] = (uch)code; |
} |
} |
Assert (dist == 256, "tr_static_init: dist != 256"); |
dist >>= 7; /* from now on, all distances are divided by 128 */ |
for ( ; code < D_CODES; code++) { |
base_dist[code] = dist << 7; |
for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) { |
_dist_code[256 + dist++] = (uch)code; |
} |
} |
Assert (dist == 256, "tr_static_init: 256+dist != 512"); |
/* Construct the codes of the static literal tree */ |
for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0; |
n = 0; |
while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++; |
while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++; |
while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++; |
while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++; |
/* Codes 286 and 287 do not exist, but we must include them in the |
* tree construction to get a canonical Huffman tree (longest code |
* all ones) |
*/ |
gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count); |
/* The static distance tree is trivial: */ |
for (n = 0; n < D_CODES; n++) { |
static_dtree[n].Len = 5; |
static_dtree[n].Code = bi_reverse((unsigned)n, 5); |
} |
static_init_done = 1; |
# ifdef GEN_TREES_H |
gen_trees_header(); |
# endif |
#endif /* defined(GEN_TREES_H) || !defined(STDC) */ |
} |
/* =========================================================================== |
* Genererate the file trees.h describing the static trees. |
*/ |
#ifdef GEN_TREES_H |
# ifndef DEBUG |
# include <stdio.h> |
# endif |
# define SEPARATOR(i, last, width) \ |
((i) == (last)? "\n};\n\n" : \ |
((i) % (width) == (width)-1 ? ",\n" : ", ")) |
void gen_trees_header() |
{ |
FILE *header = fopen("trees.h", "w"); |
int i; |
Assert (header != NULL, "Can't open trees.h"); |
fprintf(header, |
"/* header created automatically with -DGEN_TREES_H */\n\n"); |
fprintf(header, "local const ct_data static_ltree[L_CODES+2] = {\n"); |
for (i = 0; i < L_CODES+2; i++) { |
fprintf(header, "{{%3u},{%3u}}%s", static_ltree[i].Code, |
static_ltree[i].Len, SEPARATOR(i, L_CODES+1, 5)); |
} |
fprintf(header, "local const ct_data static_dtree[D_CODES] = {\n"); |
for (i = 0; i < D_CODES; i++) { |
fprintf(header, "{{%2u},{%2u}}%s", static_dtree[i].Code, |
static_dtree[i].Len, SEPARATOR(i, D_CODES-1, 5)); |
} |
fprintf(header, "const uch _dist_code[DIST_CODE_LEN] = {\n"); |
for (i = 0; i < DIST_CODE_LEN; i++) { |
fprintf(header, "%2u%s", _dist_code[i], |
SEPARATOR(i, DIST_CODE_LEN-1, 20)); |
} |
fprintf(header, "const uch _length_code[MAX_MATCH-MIN_MATCH+1]= {\n"); |
for (i = 0; i < MAX_MATCH-MIN_MATCH+1; i++) { |
fprintf(header, "%2u%s", _length_code[i], |
SEPARATOR(i, MAX_MATCH-MIN_MATCH, 20)); |
} |
fprintf(header, "local const int base_length[LENGTH_CODES] = {\n"); |
for (i = 0; i < LENGTH_CODES; i++) { |
fprintf(header, "%1u%s", base_length[i], |
SEPARATOR(i, LENGTH_CODES-1, 20)); |
} |
fprintf(header, "local const int base_dist[D_CODES] = {\n"); |
for (i = 0; i < D_CODES; i++) { |
fprintf(header, "%5u%s", base_dist[i], |
SEPARATOR(i, D_CODES-1, 10)); |
} |
fclose(header); |
} |
#endif /* GEN_TREES_H */ |
/* =========================================================================== |
* Initialize the tree data structures for a new zlib stream. |
*/ |
void _tr_init(s) |
deflate_state *s; |
{ |
tr_static_init(); |
s->l_desc.dyn_tree = s->dyn_ltree; |
s->l_desc.stat_desc = &static_l_desc; |
s->d_desc.dyn_tree = s->dyn_dtree; |
s->d_desc.stat_desc = &static_d_desc; |
s->bl_desc.dyn_tree = s->bl_tree; |
s->bl_desc.stat_desc = &static_bl_desc; |
s->bi_buf = 0; |
s->bi_valid = 0; |
s->last_eob_len = 8; /* enough lookahead for inflate */ |
#ifdef DEBUG |
s->compressed_len = 0L; |
s->bits_sent = 0L; |
#endif |
/* Initialize the first block of the first file: */ |
init_block(s); |
} |
/* =========================================================================== |
* Initialize a new block. |
*/ |
local void init_block(s) |
deflate_state *s; |
{ |
int n; /* iterates over tree elements */ |
/* Initialize the trees. */ |
for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0; |
for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0; |
for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0; |
s->dyn_ltree[END_BLOCK].Freq = 1; |
s->opt_len = s->static_len = 0L; |
s->last_lit = s->matches = 0; |
} |
#define SMALLEST 1 |
/* Index within the heap array of least frequent node in the Huffman tree */ |
/* =========================================================================== |
* Remove the smallest element from the heap and recreate the heap with |
* one less element. Updates heap and heap_len. |
*/ |
#define pqremove(s, tree, top) \ |
{\ |
top = s->heap[SMALLEST]; \ |
s->heap[SMALLEST] = s->heap[s->heap_len--]; \ |
pqdownheap(s, tree, SMALLEST); \ |
} |
/* =========================================================================== |
* Compares to subtrees, using the tree depth as tie breaker when |
* the subtrees have equal frequency. This minimizes the worst case length. |
*/ |
#define smaller(tree, n, m, depth) \ |
(tree[n].Freq < tree[m].Freq || \ |
(tree[n].Freq == tree[m].Freq && depth[n] <= depth[m])) |
/* =========================================================================== |
* Restore the heap property by moving down the tree starting at node k, |
* exchanging a node with the smallest of its two sons if necessary, stopping |
* when the heap property is re-established (each father smaller than its |
* two sons). |
*/ |
local void pqdownheap(s, tree, k) |
deflate_state *s; |
ct_data *tree; /* the tree to restore */ |
int k; /* node to move down */ |
{ |
int v = s->heap[k]; |
int j = k << 1; /* left son of k */ |
while (j <= s->heap_len) { |
/* Set j to the smallest of the two sons: */ |
if (j < s->heap_len && |
smaller(tree, s->heap[j+1], s->heap[j], s->depth)) { |
j++; |
} |
/* Exit if v is smaller than both sons */ |
if (smaller(tree, v, s->heap[j], s->depth)) break; |
/* Exchange v with the smallest son */ |
s->heap[k] = s->heap[j]; k = j; |
/* And continue down the tree, setting j to the left son of k */ |
j <<= 1; |
} |
s->heap[k] = v; |
} |
/* =========================================================================== |
* Compute the optimal bit lengths for a tree and update the total bit length |
* for the current block. |
* IN assertion: the fields freq and dad are set, heap[heap_max] and |
* above are the tree nodes sorted by increasing frequency. |
* OUT assertions: the field len is set to the optimal bit length, the |
* array bl_count contains the frequencies for each bit length. |
* The length opt_len is updated; static_len is also updated if stree is |
* not null. |
*/ |
local void gen_bitlen(s, desc) |
deflate_state *s; |
tree_desc *desc; /* the tree descriptor */ |
{ |
ct_data *tree = desc->dyn_tree; |
int max_code = desc->max_code; |
const ct_data *stree = desc->stat_desc->static_tree; |
const intf *extra = desc->stat_desc->extra_bits; |
int base = desc->stat_desc->extra_base; |
int max_length = desc->stat_desc->max_length; |
int h; /* heap index */ |
int n, m; /* iterate over the tree elements */ |
int bits; /* bit length */ |
int xbits; /* extra bits */ |
ush f; /* frequency */ |
int overflow = 0; /* number of elements with bit length too large */ |
for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0; |
/* In a first pass, compute the optimal bit lengths (which may |
* overflow in the case of the bit length tree). |
*/ |
tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */ |
for (h = s->heap_max+1; h < HEAP_SIZE; h++) { |
n = s->heap[h]; |
bits = tree[tree[n].Dad].Len + 1; |
if (bits > max_length) bits = max_length, overflow++; |
tree[n].Len = (ush)bits; |
/* We overwrite tree[n].Dad which is no longer needed */ |
if (n > max_code) continue; /* not a leaf node */ |
s->bl_count[bits]++; |
xbits = 0; |
if (n >= base) xbits = extra[n-base]; |
f = tree[n].Freq; |
s->opt_len += (ulg)f * (bits + xbits); |
if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits); |
} |
if (overflow == 0) return; |
Trace((stderr,"\nbit length overflow\n")); |
/* This happens for example on obj2 and pic of the Calgary corpus */ |
/* Find the first bit length which could increase: */ |
do { |
bits = max_length-1; |
while (s->bl_count[bits] == 0) bits--; |
s->bl_count[bits]--; /* move one leaf down the tree */ |
s->bl_count[bits+1] += 2; /* move one overflow item as its brother */ |
s->bl_count[max_length]--; |
/* The brother of the overflow item also moves one step up, |
* but this does not affect bl_count[max_length] |
*/ |
overflow -= 2; |
} while (overflow > 0); |
/* Now recompute all bit lengths, scanning in increasing frequency. |
* h is still equal to HEAP_SIZE. (It is simpler to reconstruct all |
* lengths instead of fixing only the wrong ones. This idea is taken |
* from 'ar' written by Haruhiko Okumura.) |
*/ |
for (bits = max_length; bits != 0; bits--) { |
n = s->bl_count[bits]; |
while (n != 0) { |
m = s->heap[--h]; |
if (m > max_code) continue; |
if (tree[m].Len != (unsigned) bits) { |
Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); |
s->opt_len += ((long)bits - (long)tree[m].Len) |
*(long)tree[m].Freq; |
tree[m].Len = (ush)bits; |
} |
n--; |
} |
} |
} |
/* =========================================================================== |
* Generate the codes for a given tree and bit counts (which need not be |
* optimal). |
* IN assertion: the array bl_count contains the bit length statistics for |
* the given tree and the field len is set for all tree elements. |
* OUT assertion: the field code is set for all tree elements of non |
* zero code length. |
*/ |
local void gen_codes (tree, max_code, bl_count) |
ct_data *tree; /* the tree to decorate */ |
int max_code; /* largest code with non zero frequency */ |
ushf *bl_count; /* number of codes at each bit length */ |
{ |
ush next_code[MAX_BITS+1]; /* next code value for each bit length */ |
ush code = 0; /* running code value */ |
int bits; /* bit index */ |
int n; /* code index */ |
/* The distribution counts are first used to generate the code values |
* without bit reversal. |
*/ |
for (bits = 1; bits <= MAX_BITS; bits++) { |
next_code[bits] = code = (code + bl_count[bits-1]) << 1; |
} |
/* Check that the bit counts in bl_count are consistent. The last code |
* must be all ones. |
*/ |
Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1, |
"inconsistent bit counts"); |
Tracev((stderr,"\ngen_codes: max_code %d ", max_code)); |
for (n = 0; n <= max_code; n++) { |
int len = tree[n].Len; |
if (len == 0) continue; |
/* Now reverse the bits */ |
tree[n].Code = bi_reverse(next_code[len]++, len); |
Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ", |
n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1)); |
} |
} |
/* =========================================================================== |
* Construct one Huffman tree and assigns the code bit strings and lengths. |
* Update the total bit length for the current block. |
* IN assertion: the field freq is set for all tree elements. |
* OUT assertions: the fields len and code are set to the optimal bit length |
* and corresponding code. The length opt_len is updated; static_len is |
* also updated if stree is not null. The field max_code is set. |
*/ |
local void build_tree(s, desc) |
deflate_state *s; |
tree_desc *desc; /* the tree descriptor */ |
{ |
ct_data *tree = desc->dyn_tree; |
const ct_data *stree = desc->stat_desc->static_tree; |
int elems = desc->stat_desc->elems; |
int n, m; /* iterate over heap elements */ |
int max_code = -1; /* largest code with non zero frequency */ |
int node; /* new node being created */ |
/* Construct the initial heap, with least frequent element in |
* heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. |
* heap[0] is not used. |
*/ |
s->heap_len = 0, s->heap_max = HEAP_SIZE; |
for (n = 0; n < elems; n++) { |
if (tree[n].Freq != 0) { |
s->heap[++(s->heap_len)] = max_code = n; |
s->depth[n] = 0; |
} else { |
tree[n].Len = 0; |
} |
} |
/* The pkzip format requires that at least one distance code exists, |
* and that at least one bit should be sent even if there is only one |
* possible code. So to avoid special checks later on we force at least |
* two codes of non zero frequency. |
*/ |
while (s->heap_len < 2) { |
node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0); |
tree[node].Freq = 1; |
s->depth[node] = 0; |
s->opt_len--; if (stree) s->static_len -= stree[node].Len; |
/* node is 0 or 1 so it does not have extra bits */ |
} |
desc->max_code = max_code; |
/* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, |
* establish sub-heaps of increasing lengths: |
*/ |
for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n); |
/* Construct the Huffman tree by repeatedly combining the least two |
* frequent nodes. |
*/ |
node = elems; /* next internal node of the tree */ |
do { |
pqremove(s, tree, n); /* n = node of least frequency */ |
m = s->heap[SMALLEST]; /* m = node of next least frequency */ |
s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */ |
s->heap[--(s->heap_max)] = m; |
/* Create a new node father of n and m */ |
tree[node].Freq = tree[n].Freq + tree[m].Freq; |
s->depth[node] = (uch) (MAX(s->depth[n], s->depth[m]) + 1); |
tree[n].Dad = tree[m].Dad = (ush)node; |
#ifdef DUMP_BL_TREE |
if (tree == s->bl_tree) { |
cprintf("\nnode %d(%d), sons %d(%d) %d(%d)", |
node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq); |
} |
#endif |
/* and insert the new node in the heap */ |
s->heap[SMALLEST] = node++; |
pqdownheap(s, tree, SMALLEST); |
} while (s->heap_len >= 2); |
s->heap[--(s->heap_max)] = s->heap[SMALLEST]; |
/* At this point, the fields freq and dad are set. We can now |
* generate the bit lengths. |
*/ |
gen_bitlen(s, (tree_desc *)desc); |
/* The field len is now set, we can generate the bit codes */ |
gen_codes ((ct_data *)tree, max_code, s->bl_count); |
} |
/* =========================================================================== |
* Scan a literal or distance tree to determine the frequencies of the codes |
* in the bit length tree. |
*/ |
local void scan_tree (s, tree, max_code) |
deflate_state *s; |
ct_data *tree; /* the tree to be scanned */ |
int max_code; /* and its largest code of non zero frequency */ |
{ |
int n; /* iterates over all tree elements */ |
int prevlen = -1; /* last emitted length */ |
int curlen; /* length of current code */ |
int nextlen = tree[0].Len; /* length of next code */ |
int count = 0; /* repeat count of the current code */ |
int max_count = 7; /* max repeat count */ |
int min_count = 4; /* min repeat count */ |
if (nextlen == 0) max_count = 138, min_count = 3; |
tree[max_code+1].Len = (ush)0xffff; /* guard */ |
for (n = 0; n <= max_code; n++) { |
curlen = nextlen; nextlen = tree[n+1].Len; |
if (++count < max_count && curlen == nextlen) { |
continue; |
} else if (count < min_count) { |
s->bl_tree[curlen].Freq += count; |
} else if (curlen != 0) { |
if (curlen != prevlen) s->bl_tree[curlen].Freq++; |
s->bl_tree[REP_3_6].Freq++; |
} else if (count <= 10) { |
s->bl_tree[REPZ_3_10].Freq++; |
} else { |
s->bl_tree[REPZ_11_138].Freq++; |
} |
count = 0; prevlen = curlen; |
if (nextlen == 0) { |
max_count = 138, min_count = 3; |
} else if (curlen == nextlen) { |
max_count = 6, min_count = 3; |
} else { |
max_count = 7, min_count = 4; |
} |
} |
} |
/* =========================================================================== |
* Send a literal or distance tree in compressed form, using the codes in |
* bl_tree. |
*/ |
local void send_tree (s, tree, max_code) |
deflate_state *s; |
ct_data *tree; /* the tree to be scanned */ |
int max_code; /* and its largest code of non zero frequency */ |
{ |
int n; /* iterates over all tree elements */ |
int prevlen = -1; /* last emitted length */ |
int curlen; /* length of current code */ |
int nextlen = tree[0].Len; /* length of next code */ |
int count = 0; /* repeat count of the current code */ |
int max_count = 7; /* max repeat count */ |
int min_count = 4; /* min repeat count */ |
/* tree[max_code+1].Len = -1; */ /* guard already set */ |
if (nextlen == 0) max_count = 138, min_count = 3; |
for (n = 0; n <= max_code; n++) { |
curlen = nextlen; nextlen = tree[n+1].Len; |
if (++count < max_count && curlen == nextlen) { |
continue; |
} else if (count < min_count) { |
do { send_code(s, curlen, s->bl_tree); } while (--count != 0); |
} else if (curlen != 0) { |
if (curlen != prevlen) { |
send_code(s, curlen, s->bl_tree); count--; |
} |
Assert(count >= 3 && count <= 6, " 3_6?"); |
send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2); |
} else if (count <= 10) { |
send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3); |
} else { |
send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7); |
} |
count = 0; prevlen = curlen; |
if (nextlen == 0) { |
max_count = 138, min_count = 3; |
} else if (curlen == nextlen) { |
max_count = 6, min_count = 3; |
} else { |
max_count = 7, min_count = 4; |
} |
} |
} |
/* =========================================================================== |
* Construct the Huffman tree for the bit lengths and return the index in |
* bl_order of the last bit length code to send. |
*/ |
local int build_bl_tree(s) |
deflate_state *s; |
{ |
int max_blindex; /* index of last bit length code of non zero freq */ |
/* Determine the bit length frequencies for literal and distance trees */ |
scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code); |
scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code); |
/* Build the bit length tree: */ |
build_tree(s, (tree_desc *)(&(s->bl_desc))); |
/* opt_len now includes the length of the tree representations, except |
* the lengths of the bit lengths codes and the 5+5+4 bits for the counts. |
*/ |
/* Determine the number of bit length codes to send. The pkzip format |
* requires that at least 4 bit length codes be sent. (appnote.txt says |
* 3 but the actual value used is 4.) |
*/ |
for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) { |
if (s->bl_tree[bl_order[max_blindex]].Len != 0) break; |
} |
/* Update opt_len to include the bit length tree and counts */ |
s->opt_len += 3*(max_blindex+1) + 5+5+4; |
Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", |
s->opt_len, s->static_len)); |
return max_blindex; |
} |
/* =========================================================================== |
* Send the header for a block using dynamic Huffman trees: the counts, the |
* lengths of the bit length codes, the literal tree and the distance tree. |
* IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. |
*/ |
local void send_all_trees(s, lcodes, dcodes, blcodes) |
deflate_state *s; |
int lcodes, dcodes, blcodes; /* number of codes for each tree */ |
{ |
int rank; /* index in bl_order */ |
Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); |
Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, |
"too many codes"); |
Tracev((stderr, "\nbl counts: ")); |
send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */ |
send_bits(s, dcodes-1, 5); |
send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */ |
for (rank = 0; rank < blcodes; rank++) { |
Tracev((stderr, "\nbl code %2d ", bl_order[rank])); |
send_bits(s, s->bl_tree[bl_order[rank]].Len, 3); |
} |
Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); |
send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */ |
Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); |
send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */ |
Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); |
} |
/* =========================================================================== |
* Send a stored block |
*/ |
void _tr_stored_block(s, buf, stored_len, eof) |
deflate_state *s; |
charf *buf; /* input block */ |
ulg stored_len; /* length of input block */ |
int eof; /* true if this is the last block for a file */ |
{ |
send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */ |
#ifdef DEBUG |
s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L; |
s->compressed_len += (stored_len + 4) << 3; |
#endif |
copy_block(s, buf, (unsigned)stored_len, 1); /* with header */ |
} |
/* =========================================================================== |
* Send one empty static block to give enough lookahead for inflate. |
* This takes 10 bits, of which 7 may remain in the bit buffer. |
* The current inflate code requires 9 bits of lookahead. If the |
* last two codes for the previous block (real code plus EOB) were coded |
* on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode |
* the last real code. In this case we send two empty static blocks instead |
* of one. (There are no problems if the previous block is stored or fixed.) |
* To simplify the code, we assume the worst case of last real code encoded |
* on one bit only. |
*/ |
void _tr_align(s) |
deflate_state *s; |
{ |
send_bits(s, STATIC_TREES<<1, 3); |
send_code(s, END_BLOCK, static_ltree); |
#ifdef DEBUG |
s->compressed_len += 10L; /* 3 for block type, 7 for EOB */ |
#endif |
bi_flush(s); |
/* Of the 10 bits for the empty block, we have already sent |
* (10 - bi_valid) bits. The lookahead for the last real code (before |
* the EOB of the previous block) was thus at least one plus the length |
* of the EOB plus what we have just sent of the empty static block. |
*/ |
if (1 + s->last_eob_len + 10 - s->bi_valid < 9) { |
send_bits(s, STATIC_TREES<<1, 3); |
send_code(s, END_BLOCK, static_ltree); |
#ifdef DEBUG |
s->compressed_len += 10L; |
#endif |
bi_flush(s); |
} |
s->last_eob_len = 7; |
} |
/* =========================================================================== |
* Determine the best encoding for the current block: dynamic trees, static |
* trees or store, and output the encoded block to the zip file. |
*/ |
void _tr_flush_block(s, buf, stored_len, eof) |
deflate_state *s; |
charf *buf; /* input block, or NULL if too old */ |
ulg stored_len; /* length of input block */ |
int eof; /* true if this is the last block for a file */ |
{ |
ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */ |
int max_blindex = 0; /* index of last bit length code of non zero freq */ |
/* Build the Huffman trees unless a stored block is forced */ |
if (s->level > 0) { |
/* Check if the file is ascii or binary */ |
if (s->data_type == Z_UNKNOWN) set_data_type(s); |
/* Construct the literal and distance trees */ |
build_tree(s, (tree_desc *)(&(s->l_desc))); |
Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, |
s->static_len)); |
build_tree(s, (tree_desc *)(&(s->d_desc))); |
Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, |
s->static_len)); |
/* At this point, opt_len and static_len are the total bit lengths of |
* the compressed block data, excluding the tree representations. |
*/ |
/* Build the bit length tree for the above two trees, and get the index |
* in bl_order of the last bit length code to send. |
*/ |
max_blindex = build_bl_tree(s); |
/* Determine the best encoding. Compute first the block length in bytes*/ |
opt_lenb = (s->opt_len+3+7)>>3; |
static_lenb = (s->static_len+3+7)>>3; |
Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", |
opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, |
s->last_lit)); |
if (static_lenb <= opt_lenb) opt_lenb = static_lenb; |
} else { |
Assert(buf != (char*)0, "lost buf"); |
opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ |
} |
#ifdef FORCE_STORED |
if (buf != (char*)0) { /* force stored block */ |
#else |
if (stored_len+4 <= opt_lenb && buf != (char*)0) { |
/* 4: two words for the lengths */ |
#endif |
/* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. |
* Otherwise we can't have processed more than WSIZE input bytes since |
* the last block flush, because compression would have been |
* successful. If LIT_BUFSIZE <= WSIZE, it is never too late to |
* transform a block into a stored block. |
*/ |
_tr_stored_block(s, buf, stored_len, eof); |
#ifdef FORCE_STATIC |
} else if (static_lenb >= 0) { /* force static trees */ |
#else |
} else if (static_lenb == opt_lenb) { |
#endif |
send_bits(s, (STATIC_TREES<<1)+eof, 3); |
compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree); |
#ifdef DEBUG |
s->compressed_len += 3 + s->static_len; |
#endif |
} else { |
send_bits(s, (DYN_TREES<<1)+eof, 3); |
send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1, |
max_blindex+1); |
compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree); |
#ifdef DEBUG |
s->compressed_len += 3 + s->opt_len; |
#endif |
} |
Assert (s->compressed_len == s->bits_sent, "bad compressed size"); |
/* The above check is made mod 2^32, for files larger than 512 MB |
* and uLong implemented on 32 bits. |
*/ |
init_block(s); |
if (eof) { |
bi_windup(s); |
#ifdef DEBUG |
s->compressed_len += 7; /* align on byte boundary */ |
#endif |
} |
Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, |
s->compressed_len-7*eof)); |
} |
/* =========================================================================== |
* Save the match info and tally the frequency counts. Return true if |
* the current block must be flushed. |
*/ |
int _tr_tally (s, dist, lc) |
deflate_state *s; |
unsigned dist; /* distance of matched string */ |
unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ |
{ |
s->d_buf[s->last_lit] = (ush)dist; |
s->l_buf[s->last_lit++] = (uch)lc; |
if (dist == 0) { |
/* lc is the unmatched char */ |
s->dyn_ltree[lc].Freq++; |
} else { |
s->matches++; |
/* Here, lc is the match length - MIN_MATCH */ |
dist--; /* dist = match distance - 1 */ |
Assert((ush)dist < (ush)MAX_DIST(s) && |
(ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && |
(ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); |
s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++; |
s->dyn_dtree[d_code(dist)].Freq++; |
} |
#ifdef TRUNCATE_BLOCK |
/* Try to guess if it is profitable to stop the current block here */ |
if ((s->last_lit & 0x1fff) == 0 && s->level > 2) { |
/* Compute an upper bound for the compressed length */ |
ulg out_length = (ulg)s->last_lit*8L; |
ulg in_length = (ulg)((long)s->strstart - s->block_start); |
int dcode; |
for (dcode = 0; dcode < D_CODES; dcode++) { |
out_length += (ulg)s->dyn_dtree[dcode].Freq * |
(5L+extra_dbits[dcode]); |
} |
out_length >>= 3; |
Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", |
s->last_lit, in_length, out_length, |
100L - out_length*100L/in_length)); |
if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; |
} |
#endif |
return (s->last_lit == s->lit_bufsize-1); |
/* We avoid equality with lit_bufsize because of wraparound at 64K |
* on 16 bit machines and because stored blocks are restricted to |
* 64K-1 bytes. |
*/ |
} |
/* =========================================================================== |
* Send the block data compressed using the given Huffman trees |
*/ |
local void compress_block(s, ltree, dtree) |
deflate_state *s; |
ct_data *ltree; /* literal tree */ |
ct_data *dtree; /* distance tree */ |
{ |
unsigned dist; /* distance of matched string */ |
int lc; /* match length or unmatched char (if dist == 0) */ |
unsigned lx = 0; /* running index in l_buf */ |
unsigned code; /* the code to send */ |
int extra; /* number of extra bits to send */ |
if (s->last_lit != 0) do { |
dist = s->d_buf[lx]; |
lc = s->l_buf[lx++]; |
if (dist == 0) { |
send_code(s, lc, ltree); /* send a literal byte */ |
Tracecv(isgraph(lc), (stderr," '%c' ", lc)); |
} else { |
/* Here, lc is the match length - MIN_MATCH */ |
code = _length_code[lc]; |
send_code(s, code+LITERALS+1, ltree); /* send the length code */ |
extra = extra_lbits[code]; |
if (extra != 0) { |
lc -= base_length[code]; |
send_bits(s, lc, extra); /* send the extra length bits */ |
} |
dist--; /* dist is now the match distance - 1 */ |
code = d_code(dist); |
Assert (code < D_CODES, "bad d_code"); |
send_code(s, code, dtree); /* send the distance code */ |
extra = extra_dbits[code]; |
if (extra != 0) { |
dist -= base_dist[code]; |
send_bits(s, dist, extra); /* send the extra distance bits */ |
} |
} /* literal or match pair ? */ |
/* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ |
Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow"); |
} while (lx < s->last_lit); |
send_code(s, END_BLOCK, ltree); |
s->last_eob_len = ltree[END_BLOCK].Len; |
} |
/* =========================================================================== |
* Set the data type to ASCII or BINARY, using a crude approximation: |
* binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise. |
* IN assertion: the fields freq of dyn_ltree are set and the total of all |
* frequencies does not exceed 64K (to fit in an int on 16 bit machines). |
*/ |
local void set_data_type(s) |
deflate_state *s; |
{ |
int n = 0; |
unsigned ascii_freq = 0; |
unsigned bin_freq = 0; |
while (n < 7) bin_freq += s->dyn_ltree[n++].Freq; |
while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq; |
while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq; |
s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII); |
} |
/* =========================================================================== |
* Reverse the first len bits of a code, using straightforward code (a faster |
* method would use a table) |
* IN assertion: 1 <= len <= 15 |
*/ |
local unsigned bi_reverse(code, len) |
unsigned code; /* the value to invert */ |
int len; /* its bit length */ |
{ |
register unsigned res = 0; |
do { |
res |= code & 1; |
code >>= 1, res <<= 1; |
} while (--len > 0); |
return res >> 1; |
} |
/* =========================================================================== |
* Flush the bit buffer, keeping at most 7 bits in it. |
*/ |
local void bi_flush(s) |
deflate_state *s; |
{ |
if (s->bi_valid == 16) { |
put_short(s, s->bi_buf); |
s->bi_buf = 0; |
s->bi_valid = 0; |
} else if (s->bi_valid >= 8) { |
put_byte(s, (Byte)s->bi_buf); |
s->bi_buf >>= 8; |
s->bi_valid -= 8; |
} |
} |
/* =========================================================================== |
* Flush the bit buffer and align the output on a byte boundary |
*/ |
local void bi_windup(s) |
deflate_state *s; |
{ |
if (s->bi_valid > 8) { |
put_short(s, s->bi_buf); |
} else if (s->bi_valid > 0) { |
put_byte(s, (Byte)s->bi_buf); |
} |
s->bi_buf = 0; |
s->bi_valid = 0; |
#ifdef DEBUG |
s->bits_sent = (s->bits_sent+7) & ~7; |
#endif |
} |
/* =========================================================================== |
* Copy a stored block, storing first the length and its |
* one's complement if requested. |
*/ |
local void copy_block(s, buf, len, header) |
deflate_state *s; |
charf *buf; /* the input data */ |
unsigned len; /* its length */ |
int header; /* true if block header must be written */ |
{ |
bi_windup(s); /* align on byte boundary */ |
s->last_eob_len = 8; /* enough lookahead for inflate */ |
if (header) { |
put_short(s, (ush)len); |
put_short(s, (ush)~len); |
#ifdef DEBUG |
s->bits_sent += 2*16; |
#endif |
} |
#ifdef DEBUG |
s->bits_sent += (ulg)len<<3; |
#endif |
while (len--) { |
put_byte(s, *buf++); |
} |
} |
/shark/trunk/ports/png/pngmem.c |
---|
0,0 → 1,566 |
/* pngmem.c - stub functions for memory allocation |
* |
* libpng 1.2.5 - October 3, 2002 |
* For conditions of distribution and use, see copyright notice in png.h |
* Copyright (c) 1998-2002 Glenn Randers-Pehrson |
* (Version 0.96 Copyright (c) 1996, 1997 Andreas Dilger) |
* (Version 0.88 Copyright (c) 1995, 1996 Guy Eric Schalnat, Group 42, Inc.) |
* |
* This file provides a location for all memory allocation. Users who |
* need special memory handling are expected to supply replacement |
* functions for png_malloc() and png_free(), and to use |
* png_create_read_struct_2() and png_create_write_struct_2() to |
* identify the replacement functions. |
*/ |
#define PNG_INTERNAL |
#include "png.h" |
/* Borland DOS special memory handler */ |
#if defined(__TURBOC__) && !defined(_Windows) && !defined(__FLAT__) |
/* if you change this, be sure to change the one in png.h also */ |
/* Allocate memory for a png_struct. The malloc and memset can be replaced |
by a single call to calloc() if this is thought to improve performance. */ |
png_voidp /* PRIVATE */ |
png_create_struct(int type) |
{ |
#ifdef PNG_USER_MEM_SUPPORTED |
return (png_create_struct_2(type, png_malloc_ptr_NULL, png_voidp_NULL)); |
} |
/* Alternate version of png_create_struct, for use with user-defined malloc. */ |
png_voidp /* PRIVATE */ |
png_create_struct_2(int type, png_malloc_ptr malloc_fn, png_voidp mem_ptr) |
{ |
#endif /* PNG_USER_MEM_SUPPORTED */ |
png_size_t size; |
png_voidp struct_ptr; |
if (type == PNG_STRUCT_INFO) |
size = sizeof(png_info); |
else if (type == PNG_STRUCT_PNG) |
size = sizeof(png_struct); |
else |
return (png_get_copyright()); |
#ifdef PNG_USER_MEM_SUPPORTED |
if(malloc_fn != NULL) |
{ |
png_struct dummy_struct; |
png_structp png_ptr = &dummy_struct; |
png_ptr->mem_ptr=mem_ptr; |
struct_ptr = (*(malloc_fn))(png_ptr, (png_uint_32)size); |
} |
else |
#endif /* PNG_USER_MEM_SUPPORTED */ |
struct_ptr = (png_voidp)farmalloc(size)); |
if (struct_ptr != NULL) |
png_memset(struct_ptr, 0, size); |
return (struct_ptr); |
} |
/* Free memory allocated by a png_create_struct() call */ |
void /* PRIVATE */ |
png_destroy_struct(png_voidp struct_ptr) |
{ |
#ifdef PNG_USER_MEM_SUPPORTED |
png_destroy_struct_2(struct_ptr, png_free_ptr_NULL, png_voidp_NULL); |
} |
/* Free memory allocated by a png_create_struct() call */ |
void /* PRIVATE */ |
png_destroy_struct_2(png_voidp struct_ptr, png_free_ptr free_fn, |
png_voidp mem_ptr) |
{ |
#endif |
if (struct_ptr != NULL) |
{ |
#ifdef PNG_USER_MEM_SUPPORTED |
if(free_fn != NULL) |
{ |
png_struct dummy_struct; |
png_structp png_ptr = &dummy_struct; |
png_ptr->mem_ptr=mem_ptr; |
(*(free_fn))(png_ptr, struct_ptr); |
return; |
} |
#endif /* PNG_USER_MEM_SUPPORTED */ |
farfree (struct_ptr); |
} |
} |
/* Allocate memory. For reasonable files, size should never exceed |
* 64K. However, zlib may allocate more then 64K if you don't tell |
* it not to. See zconf.h and png.h for more information. zlib does |
* need to allocate exactly 64K, so whatever you call here must |
* have the ability to do that. |
* |
* Borland seems to have a problem in DOS mode for exactly 64K. |
* It gives you a segment with an offset of 8 (perhaps to store its |
* memory stuff). zlib doesn't like this at all, so we have to |
* detect and deal with it. This code should not be needed in |
* Windows or OS/2 modes, and only in 16 bit mode. This code has |
* been updated by Alexander Lehmann for version 0.89 to waste less |
* memory. |
* |
* Note that we can't use png_size_t for the "size" declaration, |
* since on some systems a png_size_t is a 16-bit quantity, and as a |
* result, we would be truncating potentially larger memory requests |
* (which should cause a fatal error) and introducing major problems. |
*/ |
png_voidp PNGAPI |
png_malloc(png_structp png_ptr, png_uint_32 size) |
{ |
png_voidp ret; |
if (png_ptr == NULL || size == 0) |
return (NULL); |
#ifdef PNG_USER_MEM_SUPPORTED |
if(png_ptr->malloc_fn != NULL) |
{ |
ret = ((png_voidp)(*(png_ptr->malloc_fn))(png_ptr, (png_size_t)size)); |
if (ret == NULL && (png_ptr->flags&PNG_FLAG_MALLOC_NULL_MEM_OK) == 0) |
png_error(png_ptr, "Out of memory!"); |
return (ret); |
} |
else |
return png_malloc_default(png_ptr, size); |
} |
png_voidp PNGAPI |
png_malloc_default(png_structp png_ptr, png_uint_32 size) |
{ |
png_voidp ret; |
#endif /* PNG_USER_MEM_SUPPORTED */ |
#ifdef PNG_MAX_MALLOC_64K |
if (size > (png_uint_32)65536L) |
png_error(png_ptr, "Cannot Allocate > 64K"); |
#endif |
if (size == (png_uint_32)65536L) |
{ |
if (png_ptr->offset_table == NULL) |
{ |
/* try to see if we need to do any of this fancy stuff */ |
ret = farmalloc(size); |
if (ret == NULL || ((png_size_t)ret & 0xffff)) |
{ |
int num_blocks; |
png_uint_32 total_size; |
png_bytep table; |
int i; |
png_byte huge * hptr; |
if (ret != NULL) |
{ |
farfree(ret); |
ret = NULL; |
} |
if(png_ptr->zlib_window_bits > 14) |
num_blocks = (int)(1 << (png_ptr->zlib_window_bits - 14)); |
else |
num_blocks = 1; |
if (png_ptr->zlib_mem_level >= 7) |
num_blocks += (int)(1 << (png_ptr->zlib_mem_level - 7)); |
else |
num_blocks++; |
total_size = ((png_uint_32)65536L) * (png_uint_32)num_blocks+16; |
table = farmalloc(total_size); |
if (table == NULL) |
{ |
if (png_ptr->flags&PNG_FLAG_MALLOC_NULL_MEM_OK) == 0) |
png_error(png_ptr, "Out Of Memory."); /* Note "O" and "M" */ |
else |
png_warning(png_ptr, "Out Of Memory."); |
return (NULL); |
} |
if ((png_size_t)table & 0xfff0) |
{ |
if (png_ptr->flags&PNG_FLAG_MALLOC_NULL_MEM_OK) == 0) |
png_error(png_ptr, |
"Farmalloc didn't return normalized pointer"); |
else |
png_warning(png_ptr, |
"Farmalloc didn't return normalized pointer"); |
return (NULL); |
} |
png_ptr->offset_table = table; |
png_ptr->offset_table_ptr = farmalloc(num_blocks * |
sizeof (png_bytep)); |
if (png_ptr->offset_table_ptr == NULL) |
{ |
if (png_ptr->flags&PNG_FLAG_MALLOC_NULL_MEM_OK) == 0) |
png_error(png_ptr, "Out Of memory."); /* Note "O" and "M" */ |
else |
png_warning(png_ptr, "Out Of memory."); |
return (NULL); |
} |
hptr = (png_byte huge *)table; |
if ((png_size_t)hptr & 0xf) |
{ |
hptr = (png_byte huge *)((long)(hptr) & 0xfffffff0L); |
hptr = hptr + 16L; /* "hptr += 16L" fails on Turbo C++ 3.0 */ |
} |
for (i = 0; i < num_blocks; i++) |
{ |
png_ptr->offset_table_ptr[i] = (png_bytep)hptr; |
hptr = hptr + (png_uint_32)65536L; /* "+=" fails on TC++3.0 */ |
} |
png_ptr->offset_table_number = num_blocks; |
png_ptr->offset_table_count = 0; |
png_ptr->offset_table_count_free = 0; |
} |
} |
if (png_ptr->offset_table_count >= png_ptr->offset_table_number) |
{ |
if (png_ptr->flags&PNG_FLAG_MALLOC_NULL_MEM_OK) == 0) |
png_error(png_ptr, "Out of Memory."); /* Note "o" and "M" */ |
else |
png_warning(png_ptr, "Out of Memory."); |
return (NULL); |
} |
ret = png_ptr->offset_table_ptr[png_ptr->offset_table_count++]; |
} |
else |
ret = farmalloc(size); |
if (ret == NULL) |
{ |
if (png_ptr->flags&PNG_FLAG_MALLOC_NULL_MEM_OK) == 0) |
png_error(png_ptr, "Out of memory."); /* Note "o" and "m" */ |
else |
png_warning(png_ptr, "Out of memory."); /* Note "o" and "m" */ |
} |
return (ret); |
} |
/* free a pointer allocated by png_malloc(). In the default |
configuration, png_ptr is not used, but is passed in case it |
is needed. If ptr is NULL, return without taking any action. */ |
void PNGAPI |
png_free(png_structp png_ptr, png_voidp ptr) |
{ |
if (png_ptr == NULL || ptr == NULL) |
return; |
#ifdef PNG_USER_MEM_SUPPORTED |
if (png_ptr->free_fn != NULL) |
{ |
(*(png_ptr->free_fn))(png_ptr, ptr); |
return; |
} |
else png_free_default(png_ptr, ptr); |
} |
void PNGAPI |
png_free_default(png_structp png_ptr, png_voidp ptr) |
{ |
#endif /* PNG_USER_MEM_SUPPORTED */ |
if (png_ptr->offset_table != NULL) |
{ |
int i; |
for (i = 0; i < png_ptr->offset_table_count; i++) |
{ |
if (ptr == png_ptr->offset_table_ptr[i]) |
{ |
ptr = NULL; |
png_ptr->offset_table_count_free++; |
break; |
} |
} |
if (png_ptr->offset_table_count_free == png_ptr->offset_table_count) |
{ |
farfree(png_ptr->offset_table); |
farfree(png_ptr->offset_table_ptr); |
png_ptr->offset_table = NULL; |
png_ptr->offset_table_ptr = NULL; |
} |
} |
if (ptr != NULL) |
{ |
farfree(ptr); |
} |
} |
#else /* Not the Borland DOS special memory handler */ |
/* Allocate memory for a png_struct or a png_info. The malloc and |
memset can be replaced by a single call to calloc() if this is thought |
to improve performance noticably. */ |
png_voidp /* PRIVATE */ |
png_create_struct(int type) |
{ |
#ifdef PNG_USER_MEM_SUPPORTED |
return (png_create_struct_2(type, png_malloc_ptr_NULL, png_voidp_NULL)); |
} |
/* Allocate memory for a png_struct or a png_info. The malloc and |
memset can be replaced by a single call to calloc() if this is thought |
to improve performance noticably. */ |
png_voidp /* PRIVATE */ |
png_create_struct_2(int type, png_malloc_ptr malloc_fn, png_voidp mem_ptr) |
{ |
#endif /* PNG_USER_MEM_SUPPORTED */ |
png_size_t size; |
png_voidp struct_ptr; |
if (type == PNG_STRUCT_INFO) |
size = sizeof(png_info); |
else if (type == PNG_STRUCT_PNG) |
size = sizeof(png_struct); |
else |
return (NULL); |
#ifdef PNG_USER_MEM_SUPPORTED |
if(malloc_fn != NULL) |
{ |
png_struct dummy_struct; |
png_structp png_ptr = &dummy_struct; |
png_ptr->mem_ptr=mem_ptr; |
struct_ptr = (*(malloc_fn))(png_ptr, size); |
if (struct_ptr != NULL) |
png_memset(struct_ptr, 0, size); |
return (struct_ptr); |
} |
#endif /* PNG_USER_MEM_SUPPORTED */ |
#if defined(__TURBOC__) && !defined(__FLAT__) |
if ((struct_ptr = (png_voidp)farmalloc(size)) != NULL) |
#else |
# if defined(_MSC_VER) && defined(MAXSEG_64K) |
if ((struct_ptr = (png_voidp)halloc(size,1)) != NULL) |
# else |
if ((struct_ptr = (png_voidp)malloc(size)) != NULL) |
# endif |
#endif |
{ |
png_memset(struct_ptr, 0, size); |
} |
return (struct_ptr); |
} |
/* Free memory allocated by a png_create_struct() call */ |
void /* PRIVATE */ |
png_destroy_struct(png_voidp struct_ptr) |
{ |
#ifdef PNG_USER_MEM_SUPPORTED |
png_destroy_struct_2(struct_ptr, png_free_ptr_NULL, png_voidp_NULL); |
} |
/* Free memory allocated by a png_create_struct() call */ |
void /* PRIVATE */ |
png_destroy_struct_2(png_voidp struct_ptr, png_free_ptr free_fn, |
png_voidp mem_ptr) |
{ |
#endif /* PNG_USER_MEM_SUPPORTED */ |
if (struct_ptr != NULL) |
{ |
#ifdef PNG_USER_MEM_SUPPORTED |
if(free_fn != NULL) |
{ |
png_struct dummy_struct; |
png_structp png_ptr = &dummy_struct; |
png_ptr->mem_ptr=mem_ptr; |
(*(free_fn))(png_ptr, struct_ptr); |
return; |
} |
#endif /* PNG_USER_MEM_SUPPORTED */ |
#if defined(__TURBOC__) && !defined(__FLAT__) |
farfree(struct_ptr); |
#else |
# if defined(_MSC_VER) && defined(MAXSEG_64K) |
hfree(struct_ptr); |
# else |
free(struct_ptr); |
# endif |
#endif |
} |
} |
/* Allocate memory. For reasonable files, size should never exceed |
64K. However, zlib may allocate more then 64K if you don't tell |
it not to. See zconf.h and png.h for more information. zlib does |
need to allocate exactly 64K, so whatever you call here must |
have the ability to do that. */ |
png_voidp PNGAPI |
png_malloc(png_structp png_ptr, png_uint_32 size) |
{ |
png_voidp ret; |
if (png_ptr == NULL || size == 0) |
return (NULL); |
#ifdef PNG_USER_MEM_SUPPORTED |
if(png_ptr->malloc_fn != NULL) |
{ |
ret = ((png_voidp)(*(png_ptr->malloc_fn))(png_ptr, (png_size_t)size)); |
if (ret == NULL && (png_ptr->flags&PNG_FLAG_MALLOC_NULL_MEM_OK) == 0) |
png_error(png_ptr, "Out of Memory!"); |
return (ret); |
} |
else |
return (png_malloc_default(png_ptr, size)); |
} |
png_voidp PNGAPI |
png_malloc_default(png_structp png_ptr, png_uint_32 size) |
{ |
png_voidp ret; |
#endif /* PNG_USER_MEM_SUPPORTED */ |
#ifdef PNG_MAX_MALLOC_64K |
if (size > (png_uint_32)65536L) |
{ |
if(png_ptr->flags&PNG_FLAG_MALLOC_NULL_MEM_OK) == 0) |
png_error(png_ptr, "Cannot Allocate > 64K"); |
else |
return NULL; |
} |
#endif |
#if defined(__TURBOC__) && !defined(__FLAT__) |
ret = farmalloc(size); |
#else |
# if defined(_MSC_VER) && defined(MAXSEG_64K) |
ret = halloc(size, 1); |
# else |
ret = malloc((size_t)size); |
# endif |
#endif |
if (ret == NULL && (png_ptr->flags&PNG_FLAG_MALLOC_NULL_MEM_OK) == 0) |
png_error(png_ptr, "Out of Memory"); |
return (ret); |
} |
/* Free a pointer allocated by png_malloc(). If ptr is NULL, return |
without taking any action. */ |
void PNGAPI |
png_free(png_structp png_ptr, png_voidp ptr) |
{ |
if (png_ptr == NULL || ptr == NULL) |
return; |
#ifdef PNG_USER_MEM_SUPPORTED |
if (png_ptr->free_fn != NULL) |
{ |
(*(png_ptr->free_fn))(png_ptr, ptr); |
return; |
} |
else png_free_default(png_ptr, ptr); |
} |
void PNGAPI |
png_free_default(png_structp png_ptr, png_voidp ptr) |
{ |
if (png_ptr == NULL || ptr == NULL) |
return; |
#endif /* PNG_USER_MEM_SUPPORTED */ |
#if defined(__TURBOC__) && !defined(__FLAT__) |
farfree(ptr); |
#else |
# if defined(_MSC_VER) && defined(MAXSEG_64K) |
hfree(ptr); |
# else |
free(ptr); |
# endif |
#endif |
} |
#endif /* Not Borland DOS special memory handler */ |
#if defined(PNG_1_0_X) |
# define png_malloc_warn png_malloc |
#else |
/* This function was added at libpng version 1.2.3. The png_malloc_warn() |
* function will issue a png_warning and return NULL instead of issuing a |
* png_error, if it fails to allocate the requested memory. |
*/ |
png_voidp PNGAPI |
png_malloc_warn(png_structp png_ptr, png_uint_32 size) |
{ |
png_voidp ptr; |
png_uint_32 save_flags=png_ptr->flags; |
png_ptr->flags|=PNG_FLAG_MALLOC_NULL_MEM_OK; |
ptr = (png_voidp)png_malloc((png_structp)png_ptr, size); |
png_ptr->flags=save_flags; |
return(ptr); |
} |
#endif |
png_voidp PNGAPI |
png_memcpy_check (png_structp png_ptr, png_voidp s1, png_voidp s2, |
png_uint_32 length) |
{ |
png_size_t size; |
size = (png_size_t)length; |
if ((png_uint_32)size != length) |
png_error(png_ptr,"Overflow in png_memcpy_check."); |
return(png_memcpy (s1, s2, size)); |
} |
png_voidp PNGAPI |
png_memset_check (png_structp png_ptr, png_voidp s1, int value, |
png_uint_32 length) |
{ |
png_size_t size; |
size = (png_size_t)length; |
if ((png_uint_32)size != length) |
png_error(png_ptr,"Overflow in png_memset_check."); |
return (png_memset (s1, value, size)); |
} |
#ifdef PNG_USER_MEM_SUPPORTED |
/* This function is called when the application wants to use another method |
* of allocating and freeing memory. |
*/ |
void PNGAPI |
png_set_mem_fn(png_structp png_ptr, png_voidp mem_ptr, png_malloc_ptr |
malloc_fn, png_free_ptr free_fn) |
{ |
png_ptr->mem_ptr = mem_ptr; |
png_ptr->malloc_fn = malloc_fn; |
png_ptr->free_fn = free_fn; |
} |
/* This function returns a pointer to the mem_ptr associated with the user |
* functions. The application should free any memory associated with this |
* pointer before png_write_destroy and png_read_destroy are called. |
*/ |
png_voidp PNGAPI |
png_get_mem_ptr(png_structp png_ptr) |
{ |
return ((png_voidp)png_ptr->mem_ptr); |
} |
#endif /* PNG_USER_MEM_SUPPORTED */ |
/shark/trunk/ports/png/zconf.h |
---|
0,0 → 1,279 |
/* zconf.h -- configuration of the zlib compression library |
* Copyright (C) 1995-2002 Jean-loup Gailly. |
* For conditions of distribution and use, see copyright notice in zlib.h |
*/ |
/* @(#) $Id: zconf.h,v 1.1 2003-03-20 13:08:13 giacomo Exp $ */ |
#ifndef _ZCONF_H |
#define _ZCONF_H |
/* |
* If you *really* need a unique prefix for all types and library functions, |
* compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. |
*/ |
#ifdef Z_PREFIX |
# define deflateInit_ z_deflateInit_ |
# define deflate z_deflate |
# define deflateEnd z_deflateEnd |
# define inflateInit_ z_inflateInit_ |
# define inflate z_inflate |
# define inflateEnd z_inflateEnd |
# define deflateInit2_ z_deflateInit2_ |
# define deflateSetDictionary z_deflateSetDictionary |
# define deflateCopy z_deflateCopy |
# define deflateReset z_deflateReset |
# define deflateParams z_deflateParams |
# define inflateInit2_ z_inflateInit2_ |
# define inflateSetDictionary z_inflateSetDictionary |
# define inflateSync z_inflateSync |
# define inflateSyncPoint z_inflateSyncPoint |
# define inflateReset z_inflateReset |
# define compress z_compress |
# define compress2 z_compress2 |
# define uncompress z_uncompress |
# define adler32 z_adler32 |
# define crc32 z_crc32 |
# define get_crc_table z_get_crc_table |
# define Byte z_Byte |
# define uInt z_uInt |
# define uLong z_uLong |
# define Bytef z_Bytef |
# define charf z_charf |
# define intf z_intf |
# define uIntf z_uIntf |
# define uLongf z_uLongf |
# define voidpf z_voidpf |
# define voidp z_voidp |
#endif |
#if (defined(_WIN32) || defined(__WIN32__)) && !defined(WIN32) |
# define WIN32 |
#endif |
#if defined(__GNUC__) || defined(WIN32) || defined(__386__) || defined(i386) |
# ifndef __32BIT__ |
# define __32BIT__ |
# endif |
#endif |
#if defined(__MSDOS__) && !defined(MSDOS) |
# define MSDOS |
#endif |
/* |
* Compile with -DMAXSEG_64K if the alloc function cannot allocate more |
* than 64k bytes at a time (needed on systems with 16-bit int). |
*/ |
#if defined(MSDOS) && !defined(__32BIT__) |
# define MAXSEG_64K |
#endif |
#ifdef MSDOS |
# define UNALIGNED_OK |
#endif |
#if (defined(MSDOS) || defined(_WINDOWS) || defined(WIN32)) && !defined(STDC) |
# define STDC |
#endif |
#if defined(__STDC__) || defined(__cplusplus) || defined(__OS2__) |
# ifndef STDC |
# define STDC |
# endif |
#endif |
#ifndef STDC |
# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */ |
# define const |
# endif |
#endif |
/* Some Mac compilers merge all .h files incorrectly: */ |
#if defined(__MWERKS__) || defined(applec) ||defined(THINK_C) ||defined(__SC__) |
# define NO_DUMMY_DECL |
#endif |
/* Old Borland C incorrectly complains about missing returns: */ |
#if defined(__BORLANDC__) && (__BORLANDC__ < 0x500) |
# define NEED_DUMMY_RETURN |
#endif |
/* Maximum value for memLevel in deflateInit2 */ |
#ifndef MAX_MEM_LEVEL |
# ifdef MAXSEG_64K |
# define MAX_MEM_LEVEL 8 |
# else |
# define MAX_MEM_LEVEL 9 |
# endif |
#endif |
/* Maximum value for windowBits in deflateInit2 and inflateInit2. |
* WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files |
* created by gzip. (Files created by minigzip can still be extracted by |
* gzip.) |
*/ |
#ifndef MAX_WBITS |
# define MAX_WBITS 15 /* 32K LZ77 window */ |
#endif |
/* The memory requirements for deflate are (in bytes): |
(1 << (windowBits+2)) + (1 << (memLevel+9)) |
that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) |
plus a few kilobytes for small objects. For example, if you want to reduce |
the default memory requirements from 256K to 128K, compile with |
make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" |
Of course this will generally degrade compression (there's no free lunch). |
The memory requirements for inflate are (in bytes) 1 << windowBits |
that is, 32K for windowBits=15 (default value) plus a few kilobytes |
for small objects. |
*/ |
/* Type declarations */ |
#ifndef OF /* function prototypes */ |
# ifdef STDC |
# define OF(args) args |
# else |
# define OF(args) () |
# endif |
#endif |
/* The following definitions for FAR are needed only for MSDOS mixed |
* model programming (small or medium model with some far allocations). |
* This was tested only with MSC; for other MSDOS compilers you may have |
* to define NO_MEMCPY in zutil.h. If you don't need the mixed model, |
* just define FAR to be empty. |
*/ |
#if (defined(M_I86SM) || defined(M_I86MM)) && !defined(__32BIT__) |
/* MSC small or medium model */ |
# define SMALL_MEDIUM |
# ifdef _MSC_VER |
# define FAR _far |
# else |
# define FAR far |
# endif |
#endif |
#if defined(__BORLANDC__) && (defined(__SMALL__) || defined(__MEDIUM__)) |
# ifndef __32BIT__ |
# define SMALL_MEDIUM |
# define FAR _far |
# endif |
#endif |
/* Compile with -DZLIB_DLL for Windows DLL support */ |
#if defined(ZLIB_DLL) |
# if defined(_WINDOWS) || defined(WINDOWS) |
# ifdef FAR |
# undef FAR |
# endif |
# include <windows.h> |
# define ZEXPORT WINAPI |
# ifdef WIN32 |
# define ZEXPORTVA WINAPIV |
# else |
# define ZEXPORTVA FAR _cdecl _export |
# endif |
# endif |
# if defined (__BORLANDC__) |
# if (__BORLANDC__ >= 0x0500) && defined (WIN32) |
# include <windows.h> |
# define ZEXPORT __declspec(dllexport) WINAPI |
# define ZEXPORTRVA __declspec(dllexport) WINAPIV |
# else |
# if defined (_Windows) && defined (__DLL__) |
# define ZEXPORT _export |
# define ZEXPORTVA _export |
# endif |
# endif |
# endif |
#endif |
#if defined (__BEOS__) |
# if defined (ZLIB_DLL) |
# define ZEXTERN extern __declspec(dllexport) |
# else |
# define ZEXTERN extern __declspec(dllimport) |
# endif |
#endif |
#ifndef ZEXPORT |
# define ZEXPORT |
#endif |
#ifndef ZEXPORTVA |
# define ZEXPORTVA |
#endif |
#ifndef ZEXTERN |
# define ZEXTERN extern |
#endif |
#ifndef FAR |
# define FAR |
#endif |
#if !defined(MACOS) && !defined(TARGET_OS_MAC) |
typedef unsigned char Byte; /* 8 bits */ |
#endif |
typedef unsigned int uInt; /* 16 bits or more */ |
typedef unsigned long uLong; /* 32 bits or more */ |
#ifdef SMALL_MEDIUM |
/* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ |
# define Bytef Byte FAR |
#else |
typedef Byte FAR Bytef; |
#endif |
typedef char FAR charf; |
typedef int FAR intf; |
typedef uInt FAR uIntf; |
typedef uLong FAR uLongf; |
#ifdef STDC |
typedef void FAR *voidpf; |
typedef void *voidp; |
#else |
typedef Byte FAR *voidpf; |
typedef Byte *voidp; |
#endif |
#ifdef HAVE_UNISTD_H |
# include <sys/types.h> /* for off_t */ |
# include <unistd.h> /* for SEEK_* and off_t */ |
# define z_off_t off_t |
#endif |
#ifndef SEEK_SET |
# define SEEK_SET 0 /* Seek from beginning of file. */ |
# define SEEK_CUR 1 /* Seek from current position. */ |
# define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ |
#endif |
#ifndef z_off_t |
# define z_off_t long |
#endif |
/* MVS linker does not support external names larger than 8 bytes */ |
#if defined(__MVS__) |
# pragma map(deflateInit_,"DEIN") |
# pragma map(deflateInit2_,"DEIN2") |
# pragma map(deflateEnd,"DEEND") |
# pragma map(inflateInit_,"ININ") |
# pragma map(inflateInit2_,"ININ2") |
# pragma map(inflateEnd,"INEND") |
# pragma map(inflateSync,"INSY") |
# pragma map(inflateSetDictionary,"INSEDI") |
# pragma map(inflate_blocks,"INBL") |
# pragma map(inflate_blocks_new,"INBLNE") |
# pragma map(inflate_blocks_free,"INBLFR") |
# pragma map(inflate_blocks_reset,"INBLRE") |
# pragma map(inflate_codes_free,"INCOFR") |
# pragma map(inflate_codes,"INCO") |
# pragma map(inflate_fast,"INFA") |
# pragma map(inflate_flush,"INFLU") |
# pragma map(inflate_mask,"INMA") |
# pragma map(inflate_set_dictionary,"INSEDI2") |
# pragma map(inflate_copyright,"INCOPY") |
# pragma map(inflate_trees_bits,"INTRBI") |
# pragma map(inflate_trees_dynamic,"INTRDY") |
# pragma map(inflate_trees_fixed,"INTRFI") |
# pragma map(inflate_trees_free,"INTRFR") |
#endif |
#endif /* _ZCONF_H */ |
/shark/trunk/ports/png/pngget.c |
---|
0,0 → 1,927 |
/* pngget.c - retrieval of values from info struct |
* |
* libpng 1.2.5 - October 3, 2002 |
* For conditions of distribution and use, see copyright notice in png.h |
* Copyright (c) 1998-2002 Glenn Randers-Pehrson |
* (Version 0.96 Copyright (c) 1996, 1997 Andreas Dilger) |
* (Version 0.88 Copyright (c) 1995, 1996 Guy Eric Schalnat, Group 42, Inc.) |
*/ |
#define PNG_INTERNAL |
#include "png.h" |
png_uint_32 PNGAPI |
png_get_valid(png_structp png_ptr, png_infop info_ptr, png_uint_32 flag) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
return(info_ptr->valid & flag); |
else |
return(0); |
} |
png_uint_32 PNGAPI |
png_get_rowbytes(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
return(info_ptr->rowbytes); |
else |
return(0); |
} |
#if defined(PNG_INFO_IMAGE_SUPPORTED) |
png_bytepp PNGAPI |
png_get_rows(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
return(info_ptr->row_pointers); |
else |
return(0); |
} |
#endif |
#ifdef PNG_EASY_ACCESS_SUPPORTED |
/* easy access to info, added in libpng-0.99 */ |
png_uint_32 PNGAPI |
png_get_image_width(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
{ |
return info_ptr->width; |
} |
return (0); |
} |
png_uint_32 PNGAPI |
png_get_image_height(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
{ |
return info_ptr->height; |
} |
return (0); |
} |
png_byte PNGAPI |
png_get_bit_depth(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
{ |
return info_ptr->bit_depth; |
} |
return (0); |
} |
png_byte PNGAPI |
png_get_color_type(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
{ |
return info_ptr->color_type; |
} |
return (0); |
} |
png_byte PNGAPI |
png_get_filter_type(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
{ |
return info_ptr->filter_type; |
} |
return (0); |
} |
png_byte PNGAPI |
png_get_interlace_type(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
{ |
return info_ptr->interlace_type; |
} |
return (0); |
} |
png_byte PNGAPI |
png_get_compression_type(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
{ |
return info_ptr->compression_type; |
} |
return (0); |
} |
png_uint_32 PNGAPI |
png_get_x_pixels_per_meter(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
#if defined(PNG_pHYs_SUPPORTED) |
if (info_ptr->valid & PNG_INFO_pHYs) |
{ |
png_debug1(1, "in %s retrieval function\n", "png_get_x_pixels_per_meter"); |
if(info_ptr->phys_unit_type != PNG_RESOLUTION_METER) |
return (0); |
else return (info_ptr->x_pixels_per_unit); |
} |
#else |
return (0); |
#endif |
return (0); |
} |
png_uint_32 PNGAPI |
png_get_y_pixels_per_meter(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
#if defined(PNG_pHYs_SUPPORTED) |
if (info_ptr->valid & PNG_INFO_pHYs) |
{ |
png_debug1(1, "in %s retrieval function\n", "png_get_y_pixels_per_meter"); |
if(info_ptr->phys_unit_type != PNG_RESOLUTION_METER) |
return (0); |
else return (info_ptr->y_pixels_per_unit); |
} |
#else |
return (0); |
#endif |
return (0); |
} |
png_uint_32 PNGAPI |
png_get_pixels_per_meter(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
#if defined(PNG_pHYs_SUPPORTED) |
if (info_ptr->valid & PNG_INFO_pHYs) |
{ |
png_debug1(1, "in %s retrieval function\n", "png_get_pixels_per_meter"); |
if(info_ptr->phys_unit_type != PNG_RESOLUTION_METER || |
info_ptr->x_pixels_per_unit != info_ptr->y_pixels_per_unit) |
return (0); |
else return (info_ptr->x_pixels_per_unit); |
} |
#else |
return (0); |
#endif |
return (0); |
} |
#ifdef PNG_FLOATING_POINT_SUPPORTED |
float PNGAPI |
png_get_pixel_aspect_ratio(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
#if defined(PNG_pHYs_SUPPORTED) |
if (info_ptr->valid & PNG_INFO_pHYs) |
{ |
png_debug1(1, "in %s retrieval function\n", "png_get_aspect_ratio"); |
if (info_ptr->x_pixels_per_unit == 0) |
return ((float)0.0); |
else |
return ((float)((float)info_ptr->y_pixels_per_unit |
/(float)info_ptr->x_pixels_per_unit)); |
} |
#else |
return (0.0); |
#endif |
return ((float)0.0); |
} |
#endif |
png_int_32 PNGAPI |
png_get_x_offset_microns(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
#if defined(PNG_oFFs_SUPPORTED) |
if (info_ptr->valid & PNG_INFO_oFFs) |
{ |
png_debug1(1, "in %s retrieval function\n", "png_get_x_offset_microns"); |
if(info_ptr->offset_unit_type != PNG_OFFSET_MICROMETER) |
return (0); |
else return (info_ptr->x_offset); |
} |
#else |
return (0); |
#endif |
return (0); |
} |
png_int_32 PNGAPI |
png_get_y_offset_microns(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
#if defined(PNG_oFFs_SUPPORTED) |
if (info_ptr->valid & PNG_INFO_oFFs) |
{ |
png_debug1(1, "in %s retrieval function\n", "png_get_y_offset_microns"); |
if(info_ptr->offset_unit_type != PNG_OFFSET_MICROMETER) |
return (0); |
else return (info_ptr->y_offset); |
} |
#else |
return (0); |
#endif |
return (0); |
} |
png_int_32 PNGAPI |
png_get_x_offset_pixels(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
#if defined(PNG_oFFs_SUPPORTED) |
if (info_ptr->valid & PNG_INFO_oFFs) |
{ |
png_debug1(1, "in %s retrieval function\n", "png_get_x_offset_microns"); |
if(info_ptr->offset_unit_type != PNG_OFFSET_PIXEL) |
return (0); |
else return (info_ptr->x_offset); |
} |
#else |
return (0); |
#endif |
return (0); |
} |
png_int_32 PNGAPI |
png_get_y_offset_pixels(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
#if defined(PNG_oFFs_SUPPORTED) |
if (info_ptr->valid & PNG_INFO_oFFs) |
{ |
png_debug1(1, "in %s retrieval function\n", "png_get_y_offset_microns"); |
if(info_ptr->offset_unit_type != PNG_OFFSET_PIXEL) |
return (0); |
else return (info_ptr->y_offset); |
} |
#else |
return (0); |
#endif |
return (0); |
} |
#if defined(PNG_INCH_CONVERSIONS) && defined(PNG_FLOATING_POINT_SUPPORTED) |
png_uint_32 PNGAPI |
png_get_pixels_per_inch(png_structp png_ptr, png_infop info_ptr) |
{ |
return ((png_uint_32)((float)png_get_pixels_per_meter(png_ptr, info_ptr) |
*.0254 +.5)); |
} |
png_uint_32 PNGAPI |
png_get_x_pixels_per_inch(png_structp png_ptr, png_infop info_ptr) |
{ |
return ((png_uint_32)((float)png_get_x_pixels_per_meter(png_ptr, info_ptr) |
*.0254 +.5)); |
} |
png_uint_32 PNGAPI |
png_get_y_pixels_per_inch(png_structp png_ptr, png_infop info_ptr) |
{ |
return ((png_uint_32)((float)png_get_y_pixels_per_meter(png_ptr, info_ptr) |
*.0254 +.5)); |
} |
float PNGAPI |
png_get_x_offset_inches(png_structp png_ptr, png_infop info_ptr) |
{ |
return ((float)png_get_x_offset_microns(png_ptr, info_ptr) |
*.00003937); |
} |
float PNGAPI |
png_get_y_offset_inches(png_structp png_ptr, png_infop info_ptr) |
{ |
return ((float)png_get_y_offset_microns(png_ptr, info_ptr) |
*.00003937); |
} |
#if defined(PNG_pHYs_SUPPORTED) |
png_uint_32 PNGAPI |
png_get_pHYs_dpi(png_structp png_ptr, png_infop info_ptr, |
png_uint_32 *res_x, png_uint_32 *res_y, int *unit_type) |
{ |
png_uint_32 retval = 0; |
if (png_ptr != NULL && info_ptr != NULL && (info_ptr->valid & PNG_INFO_pHYs)) |
{ |
png_debug1(1, "in %s retrieval function\n", "pHYs"); |
if (res_x != NULL) |
{ |
*res_x = info_ptr->x_pixels_per_unit; |
retval |= PNG_INFO_pHYs; |
} |
if (res_y != NULL) |
{ |
*res_y = info_ptr->y_pixels_per_unit; |
retval |= PNG_INFO_pHYs; |
} |
if (unit_type != NULL) |
{ |
*unit_type = (int)info_ptr->phys_unit_type; |
retval |= PNG_INFO_pHYs; |
if(*unit_type == 1) |
{ |
if (res_x != NULL) *res_x = (png_uint_32)(*res_x * .0254 + .50); |
if (res_y != NULL) *res_y = (png_uint_32)(*res_y * .0254 + .50); |
} |
} |
} |
return (retval); |
} |
#endif /* PNG_pHYs_SUPPORTED */ |
#endif /* PNG_INCH_CONVERSIONS && PNG_FLOATING_POINT_SUPPORTED */ |
/* png_get_channels really belongs in here, too, but it's been around longer */ |
#endif /* PNG_EASY_ACCESS_SUPPORTED */ |
png_byte PNGAPI |
png_get_channels(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
return(info_ptr->channels); |
else |
return (0); |
} |
png_bytep PNGAPI |
png_get_signature(png_structp png_ptr, png_infop info_ptr) |
{ |
if (png_ptr != NULL && info_ptr != NULL) |
return(info_ptr->signature); |
else |
return (NULL); |
} |
#if defined(PNG_bKGD_SUPPORTED) |
png_uint_32 PNGAPI |
png_get_bKGD(png_structp png_ptr, png_infop info_ptr, |
png_color_16p *background) |
{ |
if (png_ptr != NULL && info_ptr != NULL && (info_ptr->valid & PNG_INFO_bKGD) |
&& background != NULL) |
{ |
png_debug1(1, "in %s retrieval function\n", "bKGD"); |
*background = &(info_ptr->background); |
return (PNG_INFO_bKGD); |
} |
return (0); |
} |
#endif |
#if defined(PNG_cHRM_SUPPORTED) |
#ifdef PNG_FLOATING_POINT_SUPPORTED |
png_uint_32 PNGAPI |
png_get_cHRM(png_structp png_ptr, png_infop info_ptr, |
double *white_x, double *white_y, double *red_x, double *red_y, |
double *green_x, double *green_y, double *blue_x, double *blue_y) |
{ |
if (png_ptr != NULL && info_ptr != NULL && (info_ptr->valid & PNG_INFO_cHRM)) |
{ |
png_debug1(1, "in %s retrieval function\n", "cHRM"); |
if (white_x != NULL) |
*white_x = (double)info_ptr->x_white; |
if (white_y != NULL) |
*white_y = (double)info_ptr->y_white; |
if (red_x != NULL) |
*red_x = (double)info_ptr->x_red; |
if (red_y != NULL) |
*red_y = (double)info_ptr->y_red; |
if (green_x != NULL) |
*green_x = (double)info_ptr->x_green; |
if (green_y != NULL) |
*green_y = (double)info_ptr->y_green; |
if (blue_x != NULL) |
*blue_x = (double)info_ptr->x_blue; |
if (blue_y != NULL) |
*blue_y = (double)info_ptr->y_blue; |
return (PNG_INFO_cHRM); |
} |
return (0); |
} |
#endif |
#ifdef PNG_FIXED_POINT_SUPPORTED |
png_uint_32 PNGAPI |
png_get_cHRM_fixed(png_structp png_ptr, png_infop info_ptr, |
png_fixed_point *white_x, png_fixed_point *white_y, png_fixed_point *red_x, |
png_fixed_point *red_y, png_fixed_point *green_x, png_fixed_point *green_y, |
png_fixed_point *blue_x, png_fixed_point *blue_y) |
{ |
if (png_ptr != NULL && info_ptr != NULL && (info_ptr->valid & PNG_INFO_cHRM)) |
{ |
png_debug1(1, "in %s retrieval function\n", "cHRM"); |
if (white_x != NULL) |
*white_x = info_ptr->int_x_white; |
if (white_y != NULL) |
*white_y = info_ptr->int_y_white; |
if (red_x != NULL) |
*red_x = info_ptr->int_x_red; |
if (red_y != NULL) |
*red_y = info_ptr->int_y_red; |
if (green_x != NULL) |
*green_x = info_ptr->int_x_green; |
if (green_y != NULL) |
*green_y = info_ptr->int_y_green; |
if (blue_x != NULL) |
*blue_x = info_ptr->int_x_blue; |
if (blue_y != NULL) |
*blue_y = info_ptr->int_y_blue; |
return (PNG_INFO_cHRM); |
} |
return (0); |
} |
#endif |
#endif |
#if defined(PNG_gAMA_SUPPORTED) |
#ifdef PNG_FLOATING_POINT_SUPPORTED |
png_uint_32 PNGAPI |
png_get_gAMA(png_structp png_ptr, png_infop info_ptr, double *file_gamma) |
{ |
if (png_ptr != NULL && info_ptr != NULL && (info_ptr->valid & PNG_INFO_gAMA) |
&& file_gamma != NULL) |
{ |
png_debug1(1, "in %s retrieval function\n", "gAMA"); |
*file_gamma = (double)info_ptr->gamma; |
return (PNG_INFO_gAMA); |
} |
return (0); |
} |
#endif |
#ifdef PNG_FIXED_POINT_SUPPORTED |
png_uint_32 PNGAPI |
png_get_gAMA_fixed(png_structp png_ptr, png_infop info_ptr, |
png_fixed_point *int_file_gamma) |
{ |
if (png_ptr != NULL && info_ptr != NULL && (info_ptr->valid & PNG_INFO_gAMA) |
&& int_file_gamma != NULL) |
{ |
png_debug1(1, "in %s retrieval function\n", "gAMA"); |
*int_file_gamma = info_ptr->int_gamma; |
return (PNG_INFO_gAMA); |
} |
return (0); |
} |
#endif |
#endif |
#if defined(PNG_sRGB_SUPPORTED) |
png_uint_32 PNGAPI |
png_get_sRGB(png_structp png_ptr, png_infop info_ptr, int *file_srgb_intent) |
{ |
if (png_ptr != NULL && info_ptr != NULL && (info_ptr->valid & PNG_INFO_sRGB) |
&& file_srgb_intent != NULL) |
{ |
png_debug1(1, "in %s retrieval function\n", "sRGB"); |
*file_srgb_intent = (int)info_ptr->srgb_intent; |
return (PNG_INFO_sRGB); |
} |
return (0); |
} |
#endif |
#if defined(PNG_iCCP_SUPPORTED) |
png_uint_32 PNGAPI |
png_get_iCCP(png_structp png_ptr, png_infop info_ptr, |
png_charpp name, int *compression_type, |
png_charpp profile, png_uint_32 *proflen) |
{ |
if (png_ptr != NULL && info_ptr != NULL && (info_ptr->valid & PNG_INFO_iCCP) |
&& name != NULL && profile != NULL && proflen != NULL) |
{ |
png_debug1(1, "in %s retrieval function\n", "iCCP"); |
*name = info_ptr->iccp_name; |
*profile = info_ptr->iccp_profile; |
/* compression_type is a dummy so the API won't have to change |
if we introduce multiple compression types later. */ |
*proflen = (int)info_ptr->iccp_proflen; |
*compression_type = (int)info_ptr->iccp_compression; |
return (PNG_INFO_iCCP); |
} |
return (0); |
} |
#endif |
#if defined(PNG_sPLT_SUPPORTED) |
png_uint_32 PNGAPI |
png_get_sPLT(png_structp png_ptr, png_infop info_ptr, |
png_sPLT_tpp spalettes) |
{ |
if (png_ptr != NULL && info_ptr != NULL && spalettes != NULL) |
*spalettes = info_ptr->splt_palettes; |
return ((png_uint_32)info_ptr->splt_palettes_num); |
} |
#endif |
#if defined(PNG_hIST_SUPPORTED) |
png_uint_32 PNGAPI |
png_get_hIST(png_structp png_ptr, png_infop info_ptr, png_uint_16p *hist) |
{ |
if (png_ptr != NULL && info_ptr != NULL && (info_ptr->valid & PNG_INFO_hIST) |
&& hist != NULL) |
{ |
png_debug1(1, "in %s retrieval function\n", "hIST"); |
*hist = info_ptr->hist; |
return (PNG_INFO_hIST); |
} |
return (0); |
} |
#endif |
png_uint_32 PNGAPI |
png_get_IHDR(png_structp png_ptr, png_infop info_ptr, |
png_uint_32 *width, png_uint_32 *height, int *bit_depth, |
int *color_type, int *interlace_type, int *compression_type, |
int *filter_type) |
{ |
if (png_ptr != NULL && info_ptr != NULL && width != NULL && height != NULL && |
bit_depth != NULL && color_type != NULL) |
{ |
int pixel_depth, channels; |
png_uint_32 rowbytes_per_pixel; |
png_debug1(1, "in %s retrieval function\n", "IHDR"); |
*width = info_ptr->width; |
*height = info_ptr->height; |
*bit_depth = info_ptr->bit_depth; |
if (info_ptr->bit_depth < 1 || info_ptr->bit_depth > 16) |
png_error(png_ptr, "Invalid bit depth"); |
*color_type = info_ptr->color_type; |
if (info_ptr->color_type > 6) |
png_error(png_ptr, "Invalid color type"); |
if (compression_type != NULL) |
*compression_type = info_ptr->compression_type; |
if (filter_type != NULL) |
*filter_type = info_ptr->filter_type; |
if (interlace_type != NULL) |
*interlace_type = info_ptr->interlace_type; |
/* check for potential overflow of rowbytes */ |
if (*color_type == PNG_COLOR_TYPE_PALETTE) |
channels = 1; |
else if (*color_type & PNG_COLOR_MASK_COLOR) |
channels = 3; |
else |
channels = 1; |
if (*color_type & PNG_COLOR_MASK_ALPHA) |
channels++; |
pixel_depth = *bit_depth * channels; |
rowbytes_per_pixel = (pixel_depth + 7) >> 3; |
if (width == 0 || *width > PNG_MAX_UINT) |
png_error(png_ptr, "Invalid image width"); |
if (height == 0 || *height > PNG_MAX_UINT) |
png_error(png_ptr, "Invalid image height"); |
if (*width > PNG_MAX_UINT/rowbytes_per_pixel - 64) |
{ |
png_error(png_ptr, |
"Width too large for libpng to process image data."); |
} |
return (1); |
} |
return (0); |
} |
#if defined(PNG_oFFs_SUPPORTED) |
png_uint_32 PNGAPI |
png_get_oFFs(png_structp png_ptr, png_infop info_ptr, |
png_int_32 *offset_x, png_int_32 *offset_y, int *unit_type) |
{ |
if (png_ptr != NULL && info_ptr != NULL && (info_ptr->valid & PNG_INFO_oFFs) |
&& offset_x != NULL && offset_y != NULL && unit_type != NULL) |
{ |
png_debug1(1, "in %s retrieval function\n", "oFFs"); |
*offset_x = info_ptr->x_offset; |
*offset_y = info_ptr->y_offset; |
*unit_type = (int)info_ptr->offset_unit_type; |
return (PNG_INFO_oFFs); |
} |
return (0); |
} |
#endif |
#if defined(PNG_pCAL_SUPPORTED) |
png_uint_32 PNGAPI |
png_get_pCAL(png_structp png_ptr, png_infop info_ptr, |
png_charp *purpose, png_int_32 *X0, png_int_32 *X1, int *type, int *nparams, |
png_charp *units, png_charpp *params) |
{ |
if (png_ptr != NULL && info_ptr != NULL && (info_ptr->valid & PNG_INFO_pCAL) |
&& purpose != NULL && X0 != NULL && X1 != NULL && type != NULL && |
nparams != NULL && units != NULL && params != NULL) |
{ |
png_debug1(1, "in %s retrieval function\n", "pCAL"); |
*purpose = info_ptr->pcal_purpose; |
*X0 = info_ptr->pcal_X0; |
*X1 = info_ptr->pcal_X1; |
*type = (int)info_ptr->pcal_type; |
*nparams = (int)info_ptr->pcal_nparams; |
*units = info_ptr->pcal_units; |
*params = info_ptr->pcal_params; |
return (PNG_INFO_pCAL); |
} |
return (0); |
} |
#endif |
#if defined(PNG_sCAL_SUPPORTED) |
#ifdef PNG_FLOATING_POINT_SUPPORTED |
png_uint_32 PNGAPI |
png_get_sCAL(png_structp png_ptr, png_infop info_ptr, |
int *unit, double *width, double *height) |
{ |
if (png_ptr != NULL && info_ptr != NULL && |
(info_ptr->valid & PNG_INFO_sCAL)) |
{ |
*unit = info_ptr->scal_unit; |
*width = info_ptr->scal_pixel_width; |
*height = info_ptr->scal_pixel_height; |
return (PNG_INFO_sCAL); |
} |
return(0); |
} |
#else |
#ifdef PNG_FIXED_POINT_SUPPORTED |
png_uint_32 PNGAPI |
png_get_sCAL_s(png_structp png_ptr, png_infop info_ptr, |
int *unit, png_charpp width, png_charpp height) |
{ |
if (png_ptr != NULL && info_ptr != NULL && |
(info_ptr->valid & PNG_INFO_sCAL)) |
{ |
*unit = info_ptr->scal_unit; |
*width = info_ptr->scal_s_width; |
*height = info_ptr->scal_s_height; |
return (PNG_INFO_sCAL); |
} |
return(0); |
} |
#endif |
#endif |
#endif |
#if defined(PNG_pHYs_SUPPORTED) |
png_uint_32 PNGAPI |
png_get_pHYs(png_structp png_ptr, png_infop info_ptr, |
png_uint_32 *res_x, png_uint_32 *res_y, int *unit_type) |
{ |
png_uint_32 retval = 0; |
if (png_ptr != NULL && info_ptr != NULL && |
(info_ptr->valid & PNG_INFO_pHYs)) |
{ |
png_debug1(1, "in %s retrieval function\n", "pHYs"); |
if (res_x != NULL) |
{ |
*res_x = info_ptr->x_pixels_per_unit; |
retval |= PNG_INFO_pHYs; |
} |
if (res_y != NULL) |
{ |
*res_y = info_ptr->y_pixels_per_unit; |
retval |= PNG_INFO_pHYs; |
} |
if (unit_type != NULL) |
{ |
*unit_type = (int)info_ptr->phys_unit_type; |
retval |= PNG_INFO_pHYs; |
} |
} |
return (retval); |
} |
#endif |
png_uint_32 PNGAPI |
png_get_PLTE(png_structp png_ptr, png_infop info_ptr, png_colorp *palette, |
int *num_palette) |
{ |
if (png_ptr != NULL && info_ptr != NULL && (info_ptr->valid & PNG_INFO_PLTE) |
&& palette != NULL) |
{ |
png_debug1(1, "in %s retrieval function\n", "PLTE"); |
*palette = info_ptr->palette; |
*num_palette = info_ptr->num_palette; |
png_debug1(3, "num_palette = %d\n", *num_palette); |
return (PNG_INFO_PLTE); |
} |
return (0); |
} |
#if defined(PNG_sBIT_SUPPORTED) |
png_uint_32 PNGAPI |
png_get_sBIT(png_structp png_ptr, png_infop info_ptr, png_color_8p *sig_bit) |
{ |
if (png_ptr != NULL && info_ptr != NULL && (info_ptr->valid & PNG_INFO_sBIT) |
&& sig_bit != NULL) |
{ |
png_debug1(1, "in %s retrieval function\n", "sBIT"); |
*sig_bit = &(info_ptr->sig_bit); |
return (PNG_INFO_sBIT); |
} |
return (0); |
} |
#endif |
#if defined(PNG_TEXT_SUPPORTED) |
png_uint_32 PNGAPI |
png_get_text(png_structp png_ptr, png_infop info_ptr, png_textp *text_ptr, |
int *num_text) |
{ |
if (png_ptr != NULL && info_ptr != NULL && info_ptr->num_text > 0) |
{ |
png_debug1(1, "in %s retrieval function\n", |
(png_ptr->chunk_name[0] == '\0' ? "text" |
: (png_const_charp)png_ptr->chunk_name)); |
if (text_ptr != NULL) |
*text_ptr = info_ptr->text; |
if (num_text != NULL) |
*num_text = info_ptr->num_text; |
return ((png_uint_32)info_ptr->num_text); |
} |
if (num_text != NULL) |
*num_text = 0; |
return(0); |
} |
#endif |
#if defined(PNG_tIME_SUPPORTED) |
png_uint_32 PNGAPI |
png_get_tIME(png_structp png_ptr, png_infop info_ptr, png_timep *mod_time) |
{ |
if (png_ptr != NULL && info_ptr != NULL && (info_ptr->valid & PNG_INFO_tIME) |
&& mod_time != NULL) |
{ |
png_debug1(1, "in %s retrieval function\n", "tIME"); |
*mod_time = &(info_ptr->mod_time); |
return (PNG_INFO_tIME); |
} |
return (0); |
} |
#endif |
#if defined(PNG_tRNS_SUPPORTED) |
png_uint_32 PNGAPI |
png_get_tRNS(png_structp png_ptr, png_infop info_ptr, |
png_bytep *trans, int *num_trans, png_color_16p *trans_values) |
{ |
png_uint_32 retval = 0; |
if (png_ptr != NULL && info_ptr != NULL && (info_ptr->valid & PNG_INFO_tRNS)) |
{ |
png_debug1(1, "in %s retrieval function\n", "tRNS"); |
if (info_ptr->color_type == PNG_COLOR_TYPE_PALETTE) |
{ |
if (trans != NULL) |
{ |
*trans = info_ptr->trans; |
retval |= PNG_INFO_tRNS; |
} |
if (trans_values != NULL) |
*trans_values = &(info_ptr->trans_values); |
} |
else /* if (info_ptr->color_type != PNG_COLOR_TYPE_PALETTE) */ |
{ |
if (trans_values != NULL) |
{ |
*trans_values = &(info_ptr->trans_values); |
retval |= PNG_INFO_tRNS; |
} |
if(trans != NULL) |
*trans = NULL; |
} |
if(num_trans != NULL) |
{ |
*num_trans = info_ptr->num_trans; |
retval |= PNG_INFO_tRNS; |
} |
} |
return (retval); |
} |
#endif |
#if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) |
png_uint_32 PNGAPI |
png_get_unknown_chunks(png_structp png_ptr, png_infop info_ptr, |
png_unknown_chunkpp unknowns) |
{ |
if (png_ptr != NULL && info_ptr != NULL && unknowns != NULL) |
*unknowns = info_ptr->unknown_chunks; |
return ((png_uint_32)info_ptr->unknown_chunks_num); |
} |
#endif |
#if defined(PNG_READ_RGB_TO_GRAY_SUPPORTED) |
png_byte PNGAPI |
png_get_rgb_to_gray_status (png_structp png_ptr) |
{ |
return (png_byte)(png_ptr? png_ptr->rgb_to_gray_status : 0); |
} |
#endif |
#if defined(PNG_USER_CHUNKS_SUPPORTED) |
png_voidp PNGAPI |
png_get_user_chunk_ptr(png_structp png_ptr) |
{ |
return (png_ptr? png_ptr->user_chunk_ptr : NULL); |
} |
#endif |
png_uint_32 PNGAPI |
png_get_compression_buffer_size(png_structp png_ptr) |
{ |
return (png_uint_32)(png_ptr? png_ptr->zbuf_size : 0L); |
} |
#ifndef PNG_1_0_X |
#ifdef PNG_ASSEMBLER_CODE_SUPPORTED |
/* this function was added to libpng 1.2.0 and should exist by default */ |
png_uint_32 PNGAPI |
png_get_asm_flags (png_structp png_ptr) |
{ |
return (png_uint_32)(png_ptr? png_ptr->asm_flags : 0L); |
} |
/* this function was added to libpng 1.2.0 and should exist by default */ |
png_uint_32 PNGAPI |
png_get_asm_flagmask (int flag_select) |
{ |
png_uint_32 settable_asm_flags = 0; |
if (flag_select & PNG_SELECT_READ) |
settable_asm_flags |= |
PNG_ASM_FLAG_MMX_READ_COMBINE_ROW | |
PNG_ASM_FLAG_MMX_READ_INTERLACE | |
PNG_ASM_FLAG_MMX_READ_FILTER_SUB | |
PNG_ASM_FLAG_MMX_READ_FILTER_UP | |
PNG_ASM_FLAG_MMX_READ_FILTER_AVG | |
PNG_ASM_FLAG_MMX_READ_FILTER_PAETH ; |
/* no non-MMX flags yet */ |
#if 0 |
/* GRR: no write-flags yet, either, but someday... */ |
if (flag_select & PNG_SELECT_WRITE) |
settable_asm_flags |= |
PNG_ASM_FLAG_MMX_WRITE_ [whatever] ; |
#endif /* 0 */ |
return settable_asm_flags; /* _theoretically_ settable capabilities only */ |
} |
#endif /* PNG_ASSEMBLER_CODE_SUPPORTED */ |
#if defined(PNG_ASSEMBLER_CODE_SUPPORTED) |
/* GRR: could add this: && defined(PNG_MMX_CODE_SUPPORTED) */ |
/* this function was added to libpng 1.2.0 */ |
png_uint_32 PNGAPI |
png_get_mmx_flagmask (int flag_select, int *compilerID) |
{ |
png_uint_32 settable_mmx_flags = 0; |
if (flag_select & PNG_SELECT_READ) |
settable_mmx_flags |= |
PNG_ASM_FLAG_MMX_READ_COMBINE_ROW | |
PNG_ASM_FLAG_MMX_READ_INTERLACE | |
PNG_ASM_FLAG_MMX_READ_FILTER_SUB | |
PNG_ASM_FLAG_MMX_READ_FILTER_UP | |
PNG_ASM_FLAG_MMX_READ_FILTER_AVG | |
PNG_ASM_FLAG_MMX_READ_FILTER_PAETH ; |
#if 0 |
/* GRR: no MMX write support yet, but someday... */ |
if (flag_select & PNG_SELECT_WRITE) |
settable_mmx_flags |= |
PNG_ASM_FLAG_MMX_WRITE_ [whatever] ; |
#endif /* 0 */ |
if (compilerID != NULL) { |
#ifdef PNG_USE_PNGVCRD |
*compilerID = 1; /* MSVC */ |
#else |
#ifdef PNG_USE_PNGGCCRD |
*compilerID = 2; /* gcc/gas */ |
#else |
*compilerID = -1; /* unknown (i.e., no asm/MMX code compiled) */ |
#endif |
#endif |
} |
return settable_mmx_flags; /* _theoretically_ settable capabilities only */ |
} |
/* this function was added to libpng 1.2.0 */ |
png_byte PNGAPI |
png_get_mmx_bitdepth_threshold (png_structp png_ptr) |
{ |
return (png_byte)(png_ptr? png_ptr->mmx_bitdepth_threshold : 0); |
} |
/* this function was added to libpng 1.2.0 */ |
png_uint_32 PNGAPI |
png_get_mmx_rowbytes_threshold (png_structp png_ptr) |
{ |
return (png_uint_32)(png_ptr? png_ptr->mmx_rowbytes_threshold : 0L); |
} |
#endif /* PNG_ASSEMBLER_CODE_SUPPORTED */ |
#endif /* PNG_1_0_X */ |
/shark/trunk/ports/png/png.c |
---|
0,0 → 1,805 |
/* png.c - location for general purpose libpng functions |
* |
* libpng version 1.2.5 - October 3, 2002 |
* Copyright (c) 1998-2002 Glenn Randers-Pehrson |
* (Version 0.96 Copyright (c) 1996, 1997 Andreas Dilger) |
* (Version 0.88 Copyright (c) 1995, 1996 Guy Eric Schalnat, Group 42, Inc.) |
* |
*/ |
#define PNG_INTERNAL |
#define PNG_NO_EXTERN |
#include "png.h" |
/* Generate a compiler error if there is an old png.h in the search path. */ |
typedef version_1_2_5 Your_png_h_is_not_version_1_2_5; |
/* Version information for C files. This had better match the version |
* string defined in png.h. */ |
#ifdef PNG_USE_GLOBAL_ARRAYS |
/* png_libpng_ver was changed to a function in version 1.0.5c */ |
const char png_libpng_ver[18] = "1.2.5"; |
/* png_sig was changed to a function in version 1.0.5c */ |
/* Place to hold the signature string for a PNG file. */ |
const png_byte FARDATA png_sig[8] = {137, 80, 78, 71, 13, 10, 26, 10}; |
/* Invoke global declarations for constant strings for known chunk types */ |
PNG_IHDR; |
PNG_IDAT; |
PNG_IEND; |
PNG_PLTE; |
PNG_bKGD; |
PNG_cHRM; |
PNG_gAMA; |
PNG_hIST; |
PNG_iCCP; |
PNG_iTXt; |
PNG_oFFs; |
PNG_pCAL; |
PNG_sCAL; |
PNG_pHYs; |
PNG_sBIT; |
PNG_sPLT; |
PNG_sRGB; |
PNG_tEXt; |
PNG_tIME; |
PNG_tRNS; |
PNG_zTXt; |
/* arrays to facilitate easy interlacing - use pass (0 - 6) as index */ |
/* start of interlace block */ |
const int FARDATA png_pass_start[] = {0, 4, 0, 2, 0, 1, 0}; |
/* offset to next interlace block */ |
const int FARDATA png_pass_inc[] = {8, 8, 4, 4, 2, 2, 1}; |
/* start of interlace block in the y direction */ |
const int FARDATA png_pass_ystart[] = {0, 0, 4, 0, 2, 0, 1}; |
/* offset to next interlace block in the y direction */ |
const int FARDATA png_pass_yinc[] = {8, 8, 8, 4, 4, 2, 2}; |
/* width of interlace block (used in assembler routines only) */ |
#ifdef PNG_HAVE_ASSEMBLER_COMBINE_ROW |
const int FARDATA png_pass_width[] = {8, 4, 4, 2, 2, 1, 1}; |
#endif |
/* Height of interlace block. This is not currently used - if you need |
* it, uncomment it here and in png.h |
const int FARDATA png_pass_height[] = {8, 8, 4, 4, 2, 2, 1}; |
*/ |
/* Mask to determine which pixels are valid in a pass */ |
const int FARDATA png_pass_mask[] = {0x80, 0x08, 0x88, 0x22, 0xaa, 0x55, 0xff}; |
/* Mask to determine which pixels to overwrite while displaying */ |
const int FARDATA png_pass_dsp_mask[] |
= {0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff}; |
#endif |
/* Tells libpng that we have already handled the first "num_bytes" bytes |
* of the PNG file signature. If the PNG data is embedded into another |
* stream we can set num_bytes = 8 so that libpng will not attempt to read |
* or write any of the magic bytes before it starts on the IHDR. |
*/ |
void PNGAPI |
png_set_sig_bytes(png_structp png_ptr, int num_bytes) |
{ |
png_debug(1, "in png_set_sig_bytes\n"); |
if (num_bytes > 8) |
png_error(png_ptr, "Too many bytes for PNG signature."); |
png_ptr->sig_bytes = (png_byte)(num_bytes < 0 ? 0 : num_bytes); |
} |
/* Checks whether the supplied bytes match the PNG signature. We allow |
* checking less than the full 8-byte signature so that those apps that |
* already read the first few bytes of a file to determine the file type |
* can simply check the remaining bytes for extra assurance. Returns |
* an integer less than, equal to, or greater than zero if sig is found, |
* respectively, to be less than, to match, or be greater than the correct |
* PNG signature (this is the same behaviour as strcmp, memcmp, etc). |
*/ |
int PNGAPI |
png_sig_cmp(png_bytep sig, png_size_t start, png_size_t num_to_check) |
{ |
png_byte png_signature[8] = {137, 80, 78, 71, 13, 10, 26, 10}; |
if (num_to_check > 8) |
num_to_check = 8; |
else if (num_to_check < 1) |
return (0); |
if (start > 7) |
return (0); |
if (start + num_to_check > 8) |
num_to_check = 8 - start; |
return ((int)(png_memcmp(&sig[start], &png_signature[start], num_to_check))); |
} |
/* (Obsolete) function to check signature bytes. It does not allow one |
* to check a partial signature. This function might be removed in the |
* future - use png_sig_cmp(). Returns true (nonzero) if the file is a PNG. |
*/ |
int PNGAPI |
png_check_sig(png_bytep sig, int num) |
{ |
return ((int)!png_sig_cmp(sig, (png_size_t)0, (png_size_t)num)); |
} |
/* Function to allocate memory for zlib and clear it to 0. */ |
#ifdef PNG_1_0_X |
voidpf PNGAPI |
#else |
voidpf /* private */ |
#endif |
png_zalloc(voidpf png_ptr, uInt items, uInt size) |
{ |
png_uint_32 num_bytes = (png_uint_32)items * size; |
png_voidp ptr; |
png_structp p=png_ptr; |
png_uint_32 save_flags=p->flags; |
p->flags|=PNG_FLAG_MALLOC_NULL_MEM_OK; |
ptr = (png_voidp)png_malloc((png_structp)png_ptr, num_bytes); |
p->flags=save_flags; |
#ifndef PNG_NO_ZALLOC_ZERO |
if (ptr == NULL) |
return ((voidpf)ptr); |
if (num_bytes > (png_uint_32)0x8000L) |
{ |
png_memset(ptr, 0, (png_size_t)0x8000L); |
png_memset((png_bytep)ptr + (png_size_t)0x8000L, 0, |
(png_size_t)(num_bytes - (png_uint_32)0x8000L)); |
} |
else |
{ |
png_memset(ptr, 0, (png_size_t)num_bytes); |
} |
#endif |
return ((voidpf)ptr); |
} |
/* function to free memory for zlib */ |
#ifdef PNG_1_0_X |
void PNGAPI |
#else |
void /* private */ |
#endif |
png_zfree(voidpf png_ptr, voidpf ptr) |
{ |
png_free((png_structp)png_ptr, (png_voidp)ptr); |
} |
/* Reset the CRC variable to 32 bits of 1's. Care must be taken |
* in case CRC is > 32 bits to leave the top bits 0. |
*/ |
void /* PRIVATE */ |
png_reset_crc(png_structp png_ptr) |
{ |
png_ptr->crc = crc32(0, Z_NULL, 0); |
} |
/* Calculate the CRC over a section of data. We can only pass as |
* much data to this routine as the largest single buffer size. We |
* also check that this data will actually be used before going to the |
* trouble of calculating it. |
*/ |
void /* PRIVATE */ |
png_calculate_crc(png_structp png_ptr, png_bytep ptr, png_size_t length) |
{ |
int need_crc = 1; |
if (png_ptr->chunk_name[0] & 0x20) /* ancillary */ |
{ |
if ((png_ptr->flags & PNG_FLAG_CRC_ANCILLARY_MASK) == |
(PNG_FLAG_CRC_ANCILLARY_USE | PNG_FLAG_CRC_ANCILLARY_NOWARN)) |
need_crc = 0; |
} |
else /* critical */ |
{ |
if (png_ptr->flags & PNG_FLAG_CRC_CRITICAL_IGNORE) |
need_crc = 0; |
} |
if (need_crc) |
png_ptr->crc = crc32(png_ptr->crc, ptr, (uInt)length); |
} |
/* Allocate the memory for an info_struct for the application. We don't |
* really need the png_ptr, but it could potentially be useful in the |
* future. This should be used in favour of malloc(sizeof(png_info)) |
* and png_info_init() so that applications that want to use a shared |
* libpng don't have to be recompiled if png_info changes size. |
*/ |
png_infop PNGAPI |
png_create_info_struct(png_structp png_ptr) |
{ |
png_infop info_ptr; |
png_debug(1, "in png_create_info_struct\n"); |
if(png_ptr == NULL) return (NULL); |
#ifdef PNG_USER_MEM_SUPPORTED |
info_ptr = (png_infop)png_create_struct_2(PNG_STRUCT_INFO, |
png_ptr->malloc_fn, png_ptr->mem_ptr); |
#else |
info_ptr = (png_infop)png_create_struct(PNG_STRUCT_INFO); |
#endif |
if (info_ptr != NULL) |
png_info_init_3(&info_ptr, sizeof(png_info)); |
return (info_ptr); |
} |
/* This function frees the memory associated with a single info struct. |
* Normally, one would use either png_destroy_read_struct() or |
* png_destroy_write_struct() to free an info struct, but this may be |
* useful for some applications. |
*/ |
void PNGAPI |
png_destroy_info_struct(png_structp png_ptr, png_infopp info_ptr_ptr) |
{ |
png_infop info_ptr = NULL; |
png_debug(1, "in png_destroy_info_struct\n"); |
if (info_ptr_ptr != NULL) |
info_ptr = *info_ptr_ptr; |
if (info_ptr != NULL) |
{ |
png_info_destroy(png_ptr, info_ptr); |
#ifdef PNG_USER_MEM_SUPPORTED |
png_destroy_struct_2((png_voidp)info_ptr, png_ptr->free_fn, |
png_ptr->mem_ptr); |
#else |
png_destroy_struct((png_voidp)info_ptr); |
#endif |
*info_ptr_ptr = NULL; |
} |
} |
/* Initialize the info structure. This is now an internal function (0.89) |
* and applications using it are urged to use png_create_info_struct() |
* instead. |
*/ |
#undef png_info_init |
void PNGAPI |
png_info_init(png_infop info_ptr) |
{ |
/* We only come here via pre-1.0.12-compiled applications */ |
png_info_init_3(&info_ptr, 0); |
} |
void PNGAPI |
png_info_init_3(png_infopp ptr_ptr, png_size_t png_info_struct_size) |
{ |
png_infop info_ptr = *ptr_ptr; |
png_debug(1, "in png_info_init_3\n"); |
if(sizeof(png_info) > png_info_struct_size) |
{ |
png_destroy_struct(info_ptr); |
info_ptr = (png_infop)png_create_struct(PNG_STRUCT_INFO); |
*ptr_ptr = info_ptr; |
} |
/* set everything to 0 */ |
png_memset(info_ptr, 0, sizeof (png_info)); |
} |
#ifdef PNG_FREE_ME_SUPPORTED |
void PNGAPI |
png_data_freer(png_structp png_ptr, png_infop info_ptr, |
int freer, png_uint_32 mask) |
{ |
png_debug(1, "in png_data_freer\n"); |
if (png_ptr == NULL || info_ptr == NULL) |
return; |
if(freer == PNG_DESTROY_WILL_FREE_DATA) |
info_ptr->free_me |= mask; |
else if(freer == PNG_USER_WILL_FREE_DATA) |
info_ptr->free_me &= ~mask; |
else |
png_warning(png_ptr, |
"Unknown freer parameter in png_data_freer."); |
} |
#endif |
void PNGAPI |
png_free_data(png_structp png_ptr, png_infop info_ptr, png_uint_32 mask, |
int num) |
{ |
png_debug(1, "in png_free_data\n"); |
if (png_ptr == NULL || info_ptr == NULL) |
return; |
#if defined(PNG_TEXT_SUPPORTED) |
/* free text item num or (if num == -1) all text items */ |
#ifdef PNG_FREE_ME_SUPPORTED |
if ((mask & PNG_FREE_TEXT) & info_ptr->free_me) |
#else |
if (mask & PNG_FREE_TEXT) |
#endif |
{ |
if (num != -1) |
{ |
if (info_ptr->text && info_ptr->text[num].key) |
{ |
png_free(png_ptr, info_ptr->text[num].key); |
info_ptr->text[num].key = NULL; |
} |
} |
else |
{ |
int i; |
for (i = 0; i < info_ptr->num_text; i++) |
png_free_data(png_ptr, info_ptr, PNG_FREE_TEXT, i); |
png_free(png_ptr, info_ptr->text); |
info_ptr->text = NULL; |
info_ptr->num_text=0; |
} |
} |
#endif |
#if defined(PNG_tRNS_SUPPORTED) |
/* free any tRNS entry */ |
#ifdef PNG_FREE_ME_SUPPORTED |
if ((mask & PNG_FREE_TRNS) & info_ptr->free_me) |
#else |
if ((mask & PNG_FREE_TRNS) && (png_ptr->flags & PNG_FLAG_FREE_TRNS)) |
#endif |
{ |
png_free(png_ptr, info_ptr->trans); |
info_ptr->valid &= ~PNG_INFO_tRNS; |
#ifndef PNG_FREE_ME_SUPPORTED |
png_ptr->flags &= ~PNG_FLAG_FREE_TRNS; |
#endif |
info_ptr->trans = NULL; |
} |
#endif |
#if defined(PNG_sCAL_SUPPORTED) |
/* free any sCAL entry */ |
#ifdef PNG_FREE_ME_SUPPORTED |
if ((mask & PNG_FREE_SCAL) & info_ptr->free_me) |
#else |
if (mask & PNG_FREE_SCAL) |
#endif |
{ |
#if defined(PNG_FIXED_POINT_SUPPORTED) && !defined(PNG_FLOATING_POINT_SUPPORTED) |
png_free(png_ptr, info_ptr->scal_s_width); |
png_free(png_ptr, info_ptr->scal_s_height); |
info_ptr->scal_s_width = NULL; |
info_ptr->scal_s_height = NULL; |
#endif |
info_ptr->valid &= ~PNG_INFO_sCAL; |
} |
#endif |
#if defined(PNG_pCAL_SUPPORTED) |
/* free any pCAL entry */ |
#ifdef PNG_FREE_ME_SUPPORTED |
if ((mask & PNG_FREE_PCAL) & info_ptr->free_me) |
#else |
if (mask & PNG_FREE_PCAL) |
#endif |
{ |
png_free(png_ptr, info_ptr->pcal_purpose); |
png_free(png_ptr, info_ptr->pcal_units); |
info_ptr->pcal_purpose = NULL; |
info_ptr->pcal_units = NULL; |
if (info_ptr->pcal_params != NULL) |
{ |
int i; |
for (i = 0; i < (int)info_ptr->pcal_nparams; i++) |
{ |
png_free(png_ptr, info_ptr->pcal_params[i]); |
info_ptr->pcal_params[i]=NULL; |
} |
png_free(png_ptr, info_ptr->pcal_params); |
info_ptr->pcal_params = NULL; |
} |
info_ptr->valid &= ~PNG_INFO_pCAL; |
} |
#endif |
#if defined(PNG_iCCP_SUPPORTED) |
/* free any iCCP entry */ |
#ifdef PNG_FREE_ME_SUPPORTED |
if ((mask & PNG_FREE_ICCP) & info_ptr->free_me) |
#else |
if (mask & PNG_FREE_ICCP) |
#endif |
{ |
png_free(png_ptr, info_ptr->iccp_name); |
png_free(png_ptr, info_ptr->iccp_profile); |
info_ptr->iccp_name = NULL; |
info_ptr->iccp_profile = NULL; |
info_ptr->valid &= ~PNG_INFO_iCCP; |
} |
#endif |
#if defined(PNG_sPLT_SUPPORTED) |
/* free a given sPLT entry, or (if num == -1) all sPLT entries */ |
#ifdef PNG_FREE_ME_SUPPORTED |
if ((mask & PNG_FREE_SPLT) & info_ptr->free_me) |
#else |
if (mask & PNG_FREE_SPLT) |
#endif |
{ |
if (num != -1) |
{ |
if(info_ptr->splt_palettes) |
{ |
png_free(png_ptr, info_ptr->splt_palettes[num].name); |
png_free(png_ptr, info_ptr->splt_palettes[num].entries); |
info_ptr->splt_palettes[num].name = NULL; |
info_ptr->splt_palettes[num].entries = NULL; |
} |
} |
else |
{ |
if(info_ptr->splt_palettes_num) |
{ |
int i; |
for (i = 0; i < (int)info_ptr->splt_palettes_num; i++) |
png_free_data(png_ptr, info_ptr, PNG_FREE_SPLT, i); |
png_free(png_ptr, info_ptr->splt_palettes); |
info_ptr->splt_palettes = NULL; |
info_ptr->splt_palettes_num = 0; |
} |
info_ptr->valid &= ~PNG_INFO_sPLT; |
} |
} |
#endif |
#if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) |
#ifdef PNG_FREE_ME_SUPPORTED |
if ((mask & PNG_FREE_UNKN) & info_ptr->free_me) |
#else |
if (mask & PNG_FREE_UNKN) |
#endif |
{ |
if (num != -1) |
{ |
if(info_ptr->unknown_chunks) |
{ |
png_free(png_ptr, info_ptr->unknown_chunks[num].data); |
info_ptr->unknown_chunks[num].data = NULL; |
} |
} |
else |
{ |
int i; |
if(info_ptr->unknown_chunks_num) |
{ |
for (i = 0; i < (int)info_ptr->unknown_chunks_num; i++) |
png_free_data(png_ptr, info_ptr, PNG_FREE_UNKN, i); |
png_free(png_ptr, info_ptr->unknown_chunks); |
info_ptr->unknown_chunks = NULL; |
info_ptr->unknown_chunks_num = 0; |
} |
} |
} |
#endif |
#if defined(PNG_hIST_SUPPORTED) |
/* free any hIST entry */ |
#ifdef PNG_FREE_ME_SUPPORTED |
if ((mask & PNG_FREE_HIST) & info_ptr->free_me) |
#else |
if ((mask & PNG_FREE_HIST) && (png_ptr->flags & PNG_FLAG_FREE_HIST)) |
#endif |
{ |
png_free(png_ptr, info_ptr->hist); |
info_ptr->hist = NULL; |
info_ptr->valid &= ~PNG_INFO_hIST; |
#ifndef PNG_FREE_ME_SUPPORTED |
png_ptr->flags &= ~PNG_FLAG_FREE_HIST; |
#endif |
} |
#endif |
/* free any PLTE entry that was internally allocated */ |
#ifdef PNG_FREE_ME_SUPPORTED |
if ((mask & PNG_FREE_PLTE) & info_ptr->free_me) |
#else |
if ((mask & PNG_FREE_PLTE) && (png_ptr->flags & PNG_FLAG_FREE_PLTE)) |
#endif |
{ |
png_zfree(png_ptr, info_ptr->palette); |
info_ptr->palette = NULL; |
info_ptr->valid &= ~PNG_INFO_PLTE; |
#ifndef PNG_FREE_ME_SUPPORTED |
png_ptr->flags &= ~PNG_FLAG_FREE_PLTE; |
#endif |
info_ptr->num_palette = 0; |
} |
#if defined(PNG_INFO_IMAGE_SUPPORTED) |
/* free any image bits attached to the info structure */ |
#ifdef PNG_FREE_ME_SUPPORTED |
if ((mask & PNG_FREE_ROWS) & info_ptr->free_me) |
#else |
if (mask & PNG_FREE_ROWS) |
#endif |
{ |
if(info_ptr->row_pointers) |
{ |
int row; |
for (row = 0; row < (int)info_ptr->height; row++) |
{ |
png_free(png_ptr, info_ptr->row_pointers[row]); |
info_ptr->row_pointers[row]=NULL; |
} |
png_free(png_ptr, info_ptr->row_pointers); |
info_ptr->row_pointers=NULL; |
} |
info_ptr->valid &= ~PNG_INFO_IDAT; |
} |
#endif |
#ifdef PNG_FREE_ME_SUPPORTED |
if(num == -1) |
info_ptr->free_me &= ~mask; |
else |
info_ptr->free_me &= ~(mask & ~PNG_FREE_MUL); |
#endif |
} |
/* This is an internal routine to free any memory that the info struct is |
* pointing to before re-using it or freeing the struct itself. Recall |
* that png_free() checks for NULL pointers for us. |
*/ |
void /* PRIVATE */ |
png_info_destroy(png_structp png_ptr, png_infop info_ptr) |
{ |
png_debug(1, "in png_info_destroy\n"); |
png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1); |
#if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) |
if (png_ptr->num_chunk_list) |
{ |
png_free(png_ptr, png_ptr->chunk_list); |
png_ptr->chunk_list=NULL; |
png_ptr->num_chunk_list=0; |
} |
#endif |
png_info_init_3(&info_ptr, sizeof(png_info)); |
} |
/* This function returns a pointer to the io_ptr associated with the user |
* functions. The application should free any memory associated with this |
* pointer before png_write_destroy() or png_read_destroy() are called. |
*/ |
png_voidp PNGAPI |
png_get_io_ptr(png_structp png_ptr) |
{ |
return (png_ptr->io_ptr); |
} |
#if !defined(PNG_NO_STDIO) |
/* Initialize the default input/output functions for the PNG file. If you |
* use your own read or write routines, you can call either png_set_read_fn() |
* or png_set_write_fn() instead of png_init_io(). If you have defined |
* PNG_NO_STDIO, you must use a function of your own because "FILE *" isn't |
* necessarily available. |
*/ |
void PNGAPI |
png_init_io(png_structp png_ptr, png_FILE_p fp) |
{ |
png_debug(1, "in png_init_io\n"); |
png_ptr->io_ptr = (png_voidp)fp; |
} |
#endif |
#if defined(PNG_TIME_RFC1123_SUPPORTED) |
/* Convert the supplied time into an RFC 1123 string suitable for use in |
* a "Creation Time" or other text-based time string. |
*/ |
png_charp PNGAPI |
png_convert_to_rfc1123(png_structp png_ptr, png_timep ptime) |
{ |
static PNG_CONST char short_months[12][4] = |
{"Jan", "Feb", "Mar", "Apr", "May", "Jun", |
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}; |
if (png_ptr->time_buffer == NULL) |
{ |
png_ptr->time_buffer = (png_charp)png_malloc(png_ptr, (png_uint_32)(29* |
sizeof(char))); |
} |
#if defined(_WIN32_WCE) |
{ |
wchar_t time_buf[29]; |
wsprintf(time_buf, TEXT("%d %S %d %02d:%02d:%02d +0000"), |
ptime->day % 32, short_months[(ptime->month - 1) % 12], |
ptime->year, ptime->hour % 24, ptime->minute % 60, |
ptime->second % 61); |
WideCharToMultiByte(CP_ACP, 0, time_buf, -1, png_ptr->time_buffer, 29, |
NULL, NULL); |
} |
#else |
#ifdef USE_FAR_KEYWORD |
{ |
char near_time_buf[29]; |
sprintf(near_time_buf, "%d %s %d %02d:%02d:%02d +0000", |
ptime->day % 32, short_months[(ptime->month - 1) % 12], |
ptime->year, ptime->hour % 24, ptime->minute % 60, |
ptime->second % 61); |
png_memcpy(png_ptr->time_buffer, near_time_buf, |
29*sizeof(char)); |
} |
#else |
sprintf(png_ptr->time_buffer, "%d %s %d %02d:%02d:%02d +0000", |
ptime->day % 32, short_months[(ptime->month - 1) % 12], |
ptime->year, ptime->hour % 24, ptime->minute % 60, |
ptime->second % 61); |
#endif |
#endif /* _WIN32_WCE */ |
return ((png_charp)png_ptr->time_buffer); |
} |
#endif /* PNG_TIME_RFC1123_SUPPORTED */ |
#if 0 |
/* Signature string for a PNG file. */ |
png_bytep PNGAPI |
png_sig_bytes(void) |
{ |
return ((png_bytep)"\211\120\116\107\015\012\032\012"); |
} |
#endif |
png_charp PNGAPI |
png_get_copyright(png_structp png_ptr) |
{ |
if (png_ptr != NULL || png_ptr == NULL) /* silence compiler warning */ |
return ((png_charp) "\n libpng version 1.2.5 - October 3, 2002\n\ |
Copyright (c) 1998-2002 Glenn Randers-Pehrson\n\ |
Copyright (c) 1996-1997 Andreas Dilger\n\ |
Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.\n"); |
return ((png_charp) ""); |
} |
/* The following return the library version as a short string in the |
* format 1.0.0 through 99.99.99zz. To get the version of *.h files used |
* with your application, print out PNG_LIBPNG_VER_STRING, which is defined |
* in png.h. |
*/ |
png_charp PNGAPI |
png_get_libpng_ver(png_structp png_ptr) |
{ |
/* Version of *.c files used when building libpng */ |
if(png_ptr != NULL) /* silence compiler warning about unused png_ptr */ |
return((png_charp) "1.2.5"); |
return((png_charp) "1.2.5"); |
} |
png_charp PNGAPI |
png_get_header_ver(png_structp png_ptr) |
{ |
/* Version of *.h files used when building libpng */ |
if(png_ptr != NULL) /* silence compiler warning about unused png_ptr */ |
return((png_charp) PNG_LIBPNG_VER_STRING); |
return((png_charp) PNG_LIBPNG_VER_STRING); |
} |
png_charp PNGAPI |
png_get_header_version(png_structp png_ptr) |
{ |
/* Returns longer string containing both version and date */ |
if(png_ptr != NULL) /* silence compiler warning about unused png_ptr */ |
return((png_charp) PNG_HEADER_VERSION_STRING); |
return((png_charp) PNG_HEADER_VERSION_STRING); |
} |
#ifdef PNG_HANDLE_AS_UNKNOWN_SUPPORTED |
int PNGAPI |
png_handle_as_unknown(png_structp png_ptr, png_bytep chunk_name) |
{ |
/* check chunk_name and return "keep" value if it's on the list, else 0 */ |
int i; |
png_bytep p; |
if((png_ptr == NULL && chunk_name == NULL) || png_ptr->num_chunk_list<=0) |
return 0; |
p=png_ptr->chunk_list+png_ptr->num_chunk_list*5-5; |
for (i = png_ptr->num_chunk_list; i; i--, p-=5) |
if (!png_memcmp(chunk_name, p, 4)) |
return ((int)*(p+4)); |
return 0; |
} |
#endif |
/* This function, added to libpng-1.0.6g, is untested. */ |
int PNGAPI |
png_reset_zstream(png_structp png_ptr) |
{ |
return (inflateReset(&png_ptr->zstream)); |
} |
/* This function was added to libpng-1.0.7 */ |
png_uint_32 PNGAPI |
png_access_version_number(void) |
{ |
/* Version of *.c files used when building libpng */ |
return((png_uint_32) 10205L); |
} |
#if !defined(PNG_1_0_X) |
#if defined(PNG_ASSEMBLER_CODE_SUPPORTED) |
/* GRR: could add this: && defined(PNG_MMX_CODE_SUPPORTED) */ |
/* this INTERNAL function was added to libpng 1.2.0 */ |
void /* PRIVATE */ |
png_init_mmx_flags (png_structp png_ptr) |
{ |
png_ptr->mmx_rowbytes_threshold = 0; |
png_ptr->mmx_bitdepth_threshold = 0; |
# if (defined(PNG_USE_PNGVCRD) || defined(PNG_USE_PNGGCCRD)) |
png_ptr->asm_flags |= PN |