|
|
@ -3502,7 +3502,7 @@ static int stbi__zbuild_huffman(stbi__zhuffman *z, stbi_uc *sizelist, int num) |
|
|
|
z->size [c] = (stbi_uc ) s; |
|
|
|
z->size [c] = (stbi_uc ) s; |
|
|
|
z->value[c] = (stbi__uint16) i; |
|
|
|
z->value[c] = (stbi__uint16) i; |
|
|
|
if (s <= STBI__ZFAST_BITS) { |
|
|
|
if (s <= STBI__ZFAST_BITS) { |
|
|
|
int k = stbi__bit_reverse(next_code[s],s); |
|
|
|
k = stbi__bit_reverse(next_code[s],s); |
|
|
|
while (k < (1 << STBI__ZFAST_BITS)) { |
|
|
|
while (k < (1 << STBI__ZFAST_BITS)) { |
|
|
|
z->fast[k] = fastv; |
|
|
|
z->fast[k] = fastv; |
|
|
|
k += (1 << s); |
|
|
|
k += (1 << s); |
|
|
@ -4111,8 +4111,7 @@ static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 r |
|
|
|
} |
|
|
|
} |
|
|
|
if (img_n != out_n) { |
|
|
|
if (img_n != out_n) { |
|
|
|
// insert alpha = 255
|
|
|
|
// insert alpha = 255
|
|
|
|
stbi_uc *cur = a->out + stride*j; |
|
|
|
cur = a->out + stride*j; |
|
|
|
int i; |
|
|
|
|
|
|
|
if (img_n == 1) { |
|
|
|
if (img_n == 1) { |
|
|
|
for (i=x-1; i >= 0; --i) { |
|
|
|
for (i=x-1; i >= 0; --i) { |
|
|
|
cur[i*2+1] = 255; |
|
|
|
cur[i*2+1] = 255; |
|
|
@ -4909,8 +4908,8 @@ static stbi_uc *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int |
|
|
|
|
|
|
|
|
|
|
|
if ( !tga_indexed && !tga_is_RLE) { |
|
|
|
if ( !tga_indexed && !tga_is_RLE) { |
|
|
|
for (i=0; i < tga_height; ++i) { |
|
|
|
for (i=0; i < tga_height; ++i) { |
|
|
|
int y = tga_inverted ? tga_height -i - 1 : i; |
|
|
|
int row = tga_inverted ? tga_height -i - 1 : i; |
|
|
|
stbi_uc *tga_row = tga_data + y*tga_width*tga_comp; |
|
|
|
stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; |
|
|
|
stbi__getn(s, tga_row, tga_width * tga_comp); |
|
|
|
stbi__getn(s, tga_row, tga_width * tga_comp); |
|
|
|
} |
|
|
|
} |
|
|
|
} else { |
|
|
|
} else { |
|
|
@ -5351,7 +5350,6 @@ static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *c |
|
|
|
|
|
|
|
|
|
|
|
if (count >= 128) { // Repeated
|
|
|
|
if (count >= 128) { // Repeated
|
|
|
|
stbi_uc value[4]; |
|
|
|
stbi_uc value[4]; |
|
|
|
int i; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (count==128) |
|
|
|
if (count==128) |
|
|
|
count = stbi__get16be(s); |
|
|
|
count = stbi__get16be(s); |
|
|
@ -5593,7 +5591,7 @@ static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) |
|
|
|
bits |= (stbi__int32) stbi__get8(s) << valid_bits; |
|
|
|
bits |= (stbi__int32) stbi__get8(s) << valid_bits; |
|
|
|
valid_bits += 8; |
|
|
|
valid_bits += 8; |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
stbi__int32 code = bits & codemask; |
|
|
|
code = bits & codemask; |
|
|
|
bits >>= codesize; |
|
|
|
bits >>= codesize; |
|
|
|
valid_bits -= codesize; |
|
|
|
valid_bits -= codesize; |
|
|
|
// @OPTIMIZE: is there some way we can accelerate the non-clear path?
|
|
|
|
// @OPTIMIZE: is there some way we can accelerate the non-clear path?
|
|
|
|