Bladeren bron

In a state that will make as fill the entire hard drive

Vinicius Teshima 1 jaar geleden
bovenliggende
commit
a500fc8b22
9 gewijzigde bestanden met toevoegingen van 33388 en 31 verwijderingen
  1. 28 2
      src/app.h
  2. 71 1
      src/buffer.h
  3. 19 0
      src/config.h
  4. 7 1
      src/da.h
  5. 8 8
      src/file.h
  6. 43 0
      src/keybind.h
  7. 58 19
      src/main.c
  8. 33153 0
      src/main.s
  9. 1 0
      t.c

+ 28 - 2
src/app.h

@@ -17,6 +17,9 @@ struct app {
 	struct font font;
 	struct buffer buf;
 	bool running;
+	struct {
+		uint8_t tab_size;
+	} cfg;
 };
 
 struct app app_create(const char *win_title);
@@ -49,7 +52,10 @@ app_create(const char *win_title)
 		.win = window_create(win_title),
 		.rdr = NULL,
 		.running = true,
-		.buf = buffer_create()
+		.buf = buffer_create(),
+		.cfg = {
+			.tab_size = 4
+		}
 	};
 
 	app.rdr = SDL_CreateRenderer(app.win.ptr, -1, SDL_RENDERER_ACCELERATED);
@@ -103,6 +109,10 @@ app_get_text_color(struct app app) {
 	return (r << 24) | (g << 16) | (b << 8) | (a << 0);
 }
 
+
+#define BRANCHLESS_IF(cond, when_true, when_false) \
+	(((cond)) * (when_true) + (!(cond)) * (when_false))
+
 void
 app_render_text(struct app app, const char *text, size_t text_size,
 		struct vec2 pos, uint32_t color, double scale)
@@ -114,13 +124,27 @@ app_render_text(struct app app, const char *text, size_t text_size,
 	char c;
 	for ( ; i < text_size; ++i ) {
 		c = text[i];
-		if ( c == '\n' ) {
+		bool cond = (pen.x > app.win.w);
+		pen.x = BRANCHLESS_IF(cond, pos.x, pen.x);
+		pen.y += BRANCHLESS_IF(cond,
+				       (double) (app.font.ch_h * scale), 0);
+		switch ( c ) {
+		case '\n': {
 			if ( i == app.buf.cur ) {
 				app_render_cursor_in_pos(app, pen, 0xFF00FFFF);
 			}
 			pen.x = pos.x;
 			pen.y += (double) (app.font.ch_h * scale);
 			continue;
+		} break;
+		case '\t': {
+			if ( i == app.buf.cur ) {
+				app_render_cursor_in_pos(app, pen, 0xFF00FFFF);
+			}
+			pen.x += (double) ((app.font.ch_w * scale)
+					   * app.cfg.tab_size);
+			continue;
+		} break;
 		}
 		app_render_char(app, c, pen, scale);
 		if ( i == app.buf.cur ) {
@@ -131,6 +155,7 @@ app_render_text(struct app app, const char *text, size_t text_size,
 	if ( i == app.buf.cur ) {
 		app_render_cursor_in_pos(app, pen, 0xFF00FFFF);
 	}
+
 }
 
 void
@@ -168,6 +193,7 @@ app_render_cursor(struct app app, uint32_t color)
 	app_render_cursor_in_pos(app, pos, color);
 }
 
+
 #endif /* defined(APP_IMP) || defined(IMP) */
 
 #endif

+ 71 - 1
src/buffer.h

@@ -3,7 +3,7 @@
 
 struct buffer {
 	DA_DEF_STRUCT_ITEM(char, data);
-	uint64_t cur;
+	size_t cur;
 };
 
 struct buffer buffer_create(void);
@@ -17,9 +17,15 @@ struct buffer buffer_mv_cur_down(struct buffer buf);
 struct buffer buffer_insert_char(struct buffer buf, size_t index, char c);
 struct buffer buffer_remove_char_mv_cur(struct buffer buf, size_t index);
 struct buffer buffer_remove_char(struct buffer buf, size_t index);
+struct buffer buffer_remove_between(struct buffer buf,
+				    size_t start, size_t end);
+
+size_t buffer_index_bw_word(struct buffer buf);
 
 #if defined(BUFFER_IMP) || defined(IMP)
 
+#include "da.h"
+
 struct buffer
 buffer_create(void)
 {
@@ -128,6 +134,70 @@ buffer_remove_char(struct buffer buf, size_t index)
 	return buf;
 }
 
+struct buffer
+buffer_remove_between(struct buffer buf, size_t start, size_t end)
+{
+	if ( buf.data.size == 0 ) {
+		return buf;
+	}
+
+	if ( start == end ) {
+		return buffer_remove_char(buf, start);
+	}
+
+	if ( start > end ) {
+		return buf;
+	}
+
+	DA_DEF_STRUCT_ITEM(char, da);
+	DA_ASSIGN(da, buf.data);
+
+	/* There is no need to check start > buf.data.size, because */
+	/* we know that start < end */
+	if ( end > da.size ) {
+		return buf;
+	}
+
+	size_t nrm = end - start;
+	printf("%ld\n", nrm);
+	memmove(da.items+start, da.items+end, da.size - nrm);
+	da.size -= nrm;
+	buf.cur -= nrm;
+
+	return buf;
+}
+
+#include <ctype.h>
+
+size_t
+buffer_index_bw_word(struct buffer buf)
+{
+	if ( buf.data.size == 0 ) {
+		return 0;
+	}
+
+	bool found_letter = false;
+	size_t i = buf.cur;
+	for ( ; i > 0; --i) {
+		char c = buf.data.items[i];
+		if ( isalpha(c) ) {
+			found_letter = true;
+		} else if ( found_letter ) {
+			return i+1;
+		}
+	}
+	if ( i == 0 ) {
+		char c = buf.data.items[i];
+		if ( isalpha(c) ) {
+			found_letter = true;
+		} else if ( found_letter ) {
+			return i+1;
+		}
+	}
+
+	return i;
+}
+
 #endif /* defined(BUFFER_IMP) || defined(IMP) */
 
 #endif

+ 19 - 0
src/config.h

@@ -0,0 +1,19 @@
+#ifndef CONFIG_H
+#define CONFIG_H
+
+#include "keybind.h"
+
+static const struct keybinds keybinds[] = {
+	[SDLK_LEFT] = {
+		.binds = {
+			{KMOD_NONE, keybind_left, NULL}
+		}
+	},
+	[SDLK_RIGHT] = {
+		.binds = {
+			{KMOD_NONE, keybind_right, NULL}
+		}
+	}
+};
+
+#endif

+ 7 - 1
src/da.h

@@ -1,12 +1,13 @@
 #ifndef DA_H
 #define DA_H
 
+#include <stdlib.h>
+
 #define CONCAT(a, b) CONCAT_INNER(a, b)
 #define CONCAT_INNER(a, b) a ## b
 
 #define UNIQUE_NAME(base) CONCAT(base, __COUNTER__)
 
-#include <stdlib.h>
 
 #define DA_DEF_STRUCT(type, name)		\
 	struct name {				\
@@ -83,4 +84,9 @@
 
 #define DA_TAIL(da) (da).items[(da).size-1]
 
+#define DA_ASSIGN(dst, src)			\
+	(dst).items = (src).items;		\
+	(dst).size = (src).size;		\
+	(dst).cap = (src).cap;
+
 #endif

+ 8 - 8
src/file.h

@@ -18,20 +18,20 @@ enum file_err {
 	FILE_ERR_FILE_EMPTY,
 };
 
-struct ret_uint8_p_err {
-	uint8_t *f1;
-	uint64_t f2;
-	enum file_err f3;
+struct ret_void_p_err {
+	void *f1;		/* ptr */
+	size_t f2;		/* size */
+	enum file_err f3;	/* err */
 };
 
-struct ret_uint8_p_err file_read_all(const char *filepath);
+struct ret_void_p_err file_read_all(const char *filepath);
 
 #if defined(BMP_IMP) || defined(IMP)
 
 #include <unistd.h>
 #include <fcntl.h>
 
-struct ret_uint8_p_err
+struct ret_void_p_err
 file_read_all(const char *filepath)
 {
 	enum file_err err = FILE_ERR_OK;
@@ -77,7 +77,7 @@ file_read_all(const char *filepath)
 		goto err;
 	}
 
-	return (struct ret_uint8_p_err) {
+	return (struct ret_void_p_err) {
 		.f1 = buf,
 		.f2 = buf_size,
 		.f3 = err
@@ -91,7 +91,7 @@ err_close: ;
 	}
 
 err: ;
-	return (struct ret_uint8_p_err) {
+	return (struct ret_void_p_err) {
 		.f1 = NULL,
 		.f2 = 0,
 		.f3 = err

+ 43 - 0
src/keybind.h

@@ -0,0 +1,43 @@
+#ifndef KEYBIND_H
+#define KEYBIND_H
+
+#include "app.h"
+
+#define KEYBIND_FUNC(name)				\
+	struct app keybind_ ## name(struct app app, const void *args)
+
+
+typedef struct app (*keybind_func)(struct app, const void *);
+
+struct bind {
+	const SDL_Keymod mod;
+	keybind_func func;
+	const void *args;
+};
+
+#define BINDS_SIZE 5
+struct keybinds {
+	const struct bind binds[BINDS_SIZE];
+};
+
+KEYBIND_FUNC(left);
+KEYBIND_FUNC(right);
+
+#if defined(KEYBIND_IMP) || defined(IMP)
+
+KEYBIND_FUNC(left) {
+	(void) args;
+	return app;
+}
+
+KEYBIND_FUNC(right) {
+	(void) args;
+	return app;
+}
+
+
+#endif /* defined(KEYBIND_IMP) || defined(IMP) */
+
+#undef KEYBIND_FUNC
+
+#endif

+ 58 - 19
src/main.c

@@ -21,6 +21,9 @@
 #include "vec2.h"
 #include "da.h"
 #include "unwrap.h"
+#include "file.h"
+#include "config.h"
+
 
 struct ret_app_buffer {
 	struct app f1;
@@ -37,15 +40,27 @@ main(int32_t argc, char **argv)
 	SCE(SDL_Init(SDL_INIT_VIDEO));
 
 	const char *font_path = "./charmap-oldschool_white.png";
+	const char *file_path = "./src/main.c";
 
 	struct app app = app_create("ged");
 
 	app.font = font_create(app.rdr, font_path, 7, 18);
 	app.font.scale = 2.0;
 
+	free(app.buf.data.items);
+
+	void *ptr = NULL;
+	size_t file_size = 0;
+	enum file_err err;
+	RET_UNWRAP3(ptr, file_size, err,
+		    struct ret_void_p_err, file_read_all(file_path));
+	(void) err;
+	app.buf.data.items = ptr;
+	app.buf.data.size = file_size-1;
+	app.buf.data.cap = file_size;
+
 	uint64_t i = 0;
 	while ( app.running == true ) {
-		WINDOW_UP_SIZE(app.win);
 		struct buffer *c_buf = &app.buf;
 
 		RET_UNWRAP2(app, *c_buf, struct ret_app_buffer,
@@ -68,6 +83,7 @@ main(int32_t argc, char **argv)
 	exit(EXIT_SUCCESS);
 }
 
+
 struct ret_app_buffer
 handle_events(struct app app, struct buffer buf)
 {
@@ -78,24 +94,18 @@ handle_events(struct app app, struct buffer buf)
 			app.running = false;
 		} break;
 		case SDL_KEYDOWN: {
-			switch ( e.key.keysym.sym ) {
-			case SDLK_BACKSPACE: {
-				buf = buffer_remove_char_mv_cur(buf, buf.cur-1);
-			} break;
-			case SDLK_DELETE: {
-				buf = buffer_remove_char(buf, buf.cur);
-			} break;
-			case SDLK_RETURN: {
-				buf = buffer_insert_char(buf, buf.cur, '\n');
-			} break;
-			case SDLK_LEFT: {
-				buf = buffer_mv_cur_letf(buf);
-			} break;
-			case SDLK_RIGHT: {
-				buf = buffer_mv_cur_right(buf);
-			} break;
-			case SDLK_PLUS: {
-			} break;
+			SDL_KeyboardEvent key = e.key;
+			SDL_Keysym ks = key.keysym;
+			struct keybinds kb = keybinds[ks.sym];
+			for ( size_t i = 0; i < BINDS_SIZE; ++i ) {
+				struct bind b = kb.binds[i];
+				if ( b.func == NULL ) {
+					continue;
+				}
+				if ( b.mod != ks.mod ) {
+					continue;
+				}
+				app = b.func(app, b.args);
 			}
 		} break;
 		case SDL_TEXTINPUT: {
@@ -104,6 +114,9 @@ handle_events(struct app app, struct buffer buf)
 				buf = buffer_insert_char(buf, buf.cur, *t++);
 			}
 		} break;
+		case SDL_WINDOWEVENT: {
+			WINDOW_UP_SIZE(app.win);
+		} break;
 		}
 	}
 
@@ -113,3 +126,29 @@ handle_events(struct app app, struct buffer buf)
 	};
 }
 
+			/* switch ( ks.sym ) { */
+			/* case SDLK_BACKSPACE: { */
+			/* 	if ( ks.mod & KMOD_CTRL ) { */
+			/* 		size_t index = buffer_index_bw_word(buf); */
+			/* 		printf("%ld %ld\n", index, buf.cur); */
+			/* 		buf = buffer_remove_between( */
+			/* 			buf, index, buf.cur); */
+			/* 	} else { */
+			/* 		buf = buffer_remove_char_mv_cur(buf, buf.cur-1); */
+			/* 	} */
+			/* } break; */
+			/* case SDLK_DELETE: { */
+			/* 	buf = buffer_remove_char(buf, buf.cur); */
+			/* } break; */
+			/* case SDLK_RETURN: { */
+			/* 	buf = buffer_insert_char(buf, buf.cur, '\n'); */
+			/* } break; */
+			/* case SDLK_LEFT: { */
+			/* 	buf = buffer_mv_cur_letf(buf); */
+			/* } break; */
+			/* case SDLK_RIGHT: { */
+			/* 	buf = buffer_mv_cur_right(buf); */
+			/* } break; */
+			/* case SDLK_PLUS: { */
+			/* } break; */
+			/* } */

+ 33153 - 0
src/main.s

@@ -0,0 +1,33153 @@
+	.file	"main.c"
+	.text
+	.type	stbi__sse2_available, @function
+stbi__sse2_available:
+.LFB4879:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	$1, %eax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4879:
+	.size	stbi__sse2_available, .-stbi__sse2_available
+	.type	stbi__start_mem, @function
+stbi__start_mem:
+.LFB4880:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -8(%rbp)
+	movq	%rsi, -16(%rbp)
+	movl	%edx, -20(%rbp)
+	movq	-8(%rbp), %rax
+	movq	$0, 16(%rax)
+	movq	-8(%rbp), %rax
+	movl	$0, 48(%rax)
+	movq	-8(%rbp), %rax
+	movl	$0, 184(%rax)
+	movq	-8(%rbp), %rax
+	movq	-16(%rbp), %rdx
+	movq	%rdx, 208(%rax)
+	movq	-8(%rbp), %rax
+	movq	208(%rax), %rdx
+	movq	-8(%rbp), %rax
+	movq	%rdx, 192(%rax)
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-16(%rbp), %rax
+	addq	%rax, %rdx
+	movq	-8(%rbp), %rax
+	movq	%rdx, 216(%rax)
+	movq	-8(%rbp), %rax
+	movq	216(%rax), %rdx
+	movq	-8(%rbp), %rax
+	movq	%rdx, 200(%rax)
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4880:
+	.size	stbi__start_mem, .-stbi__start_mem
+	.type	stbi__start_callbacks, @function
+stbi__start_callbacks:
+.LFB4881:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	%rsi, -16(%rbp)
+	movq	%rdx, -24(%rbp)
+	movq	-8(%rbp), %rcx
+	movq	-16(%rbp), %rsi
+	movq	(%rsi), %rax
+	movq	8(%rsi), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	movq	16(%rsi), %rax
+	movq	%rax, 32(%rcx)
+	movq	-8(%rbp), %rax
+	movq	-24(%rbp), %rdx
+	movq	%rdx, 40(%rax)
+	movq	-8(%rbp), %rax
+	movl	$128, 52(%rax)
+	movq	-8(%rbp), %rax
+	movl	$1, 48(%rax)
+	movq	-8(%rbp), %rax
+	movl	$0, 184(%rax)
+	movq	-8(%rbp), %rax
+	leaq	56(%rax), %rdx
+	movq	-8(%rbp), %rax
+	movq	%rdx, 208(%rax)
+	movq	-8(%rbp), %rax
+	movq	208(%rax), %rdx
+	movq	-8(%rbp), %rax
+	movq	%rdx, 192(%rax)
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__refill_buffer
+	movq	-8(%rbp), %rax
+	movq	200(%rax), %rdx
+	movq	-8(%rbp), %rax
+	movq	%rdx, 216(%rax)
+	nop
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4881:
+	.size	stbi__start_callbacks, .-stbi__start_callbacks
+	.type	stbi__stdio_read, @function
+stbi__stdio_read:
+.LFB4882:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	%rsi, -16(%rbp)
+	movl	%edx, -20(%rbp)
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-8(%rbp), %rcx
+	movq	-16(%rbp), %rax
+	movl	$1, %esi
+	movq	%rax, %rdi
+	call	fread@PLT
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4882:
+	.size	stbi__stdio_read, .-stbi__stdio_read
+	.type	stbi__stdio_skip, @function
+stbi__stdio_skip:
+.LFB4883:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movl	%esi, -28(%rbp)
+	movl	-28(%rbp), %eax
+	movslq	%eax, %rcx
+	movq	-24(%rbp), %rax
+	movl	$1, %edx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	fseek@PLT
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	fgetc@PLT
+	movl	%eax, -4(%rbp)
+	cmpl	$-1, -4(%rbp)
+	je	.L9
+	movq	-24(%rbp), %rdx
+	movl	-4(%rbp), %eax
+	movq	%rdx, %rsi
+	movl	%eax, %edi
+	call	ungetc@PLT
+.L9:
+	nop
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4883:
+	.size	stbi__stdio_skip, .-stbi__stdio_skip
+	.type	stbi__stdio_eof, @function
+stbi__stdio_eof:
+.LFB4884:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	feof@PLT
+	testl	%eax, %eax
+	jne	.L11
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	ferror@PLT
+	testl	%eax, %eax
+	je	.L12
+.L11:
+	movl	$1, %eax
+	jmp	.L14
+.L12:
+	movl	$0, %eax
+.L14:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4884:
+	.size	stbi__stdio_eof, .-stbi__stdio_eof
+	.section	.data.rel.local,"aw"
+	.align 16
+	.type	stbi__stdio_callbacks, @object
+	.size	stbi__stdio_callbacks, 24
+stbi__stdio_callbacks:
+	.quad	stbi__stdio_read
+	.quad	stbi__stdio_skip
+	.quad	stbi__stdio_eof
+	.text
+	.type	stbi__start_file, @function
+stbi__start_file:
+.LFB4885:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	%rsi, -16(%rbp)
+	movq	-16(%rbp), %rdx
+	movq	-8(%rbp), %rax
+	leaq	stbi__stdio_callbacks(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_callbacks
+	nop
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4885:
+	.size	stbi__start_file, .-stbi__start_file
+	.type	stbi__rewind, @function
+stbi__rewind:
+.LFB4886:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movq	208(%rax), %rdx
+	movq	-8(%rbp), %rax
+	movq	%rdx, 192(%rax)
+	movq	-8(%rbp), %rax
+	movq	216(%rax), %rdx
+	movq	-8(%rbp), %rax
+	movq	%rdx, 200(%rax)
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4886:
+	.size	stbi__rewind, .-stbi__rewind
+	.section	.tbss,"awT",@nobits
+	.align 8
+	.type	stbi__g_failure_reason, @object
+	.size	stbi__g_failure_reason, 8
+stbi__g_failure_reason:
+	.zero	8
+	.text
+	.globl	stbi_failure_reason
+	.type	stbi_failure_reason, @function
+stbi_failure_reason:
+.LFB4887:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%fs:stbi__g_failure_reason@tpoff, %rax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4887:
+	.size	stbi_failure_reason, .-stbi_failure_reason
+	.type	stbi__err, @function
+stbi__err:
+.LFB4888:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movq	%rax, %fs:stbi__g_failure_reason@tpoff
+	movl	$0, %eax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4888:
+	.size	stbi__err, .-stbi__err
+	.type	stbi__malloc, @function
+stbi__malloc:
+.LFB4889:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	malloc@PLT
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4889:
+	.size	stbi__malloc, .-stbi__malloc
+	.type	stbi__addsizes_valid, @function
+stbi__addsizes_valid:
+.LFB4890:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -4(%rbp)
+	movl	%esi, -8(%rbp)
+	cmpl	$0, -8(%rbp)
+	jns	.L24
+	movl	$0, %eax
+	jmp	.L25
+.L24:
+	movl	$2147483647, %eax
+	subl	-8(%rbp), %eax
+	cmpl	%eax, -4(%rbp)
+	setle	%al
+	movzbl	%al, %eax
+.L25:
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4890:
+	.size	stbi__addsizes_valid, .-stbi__addsizes_valid
+	.type	stbi__mul2sizes_valid, @function
+stbi__mul2sizes_valid:
+.LFB4891:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -4(%rbp)
+	movl	%esi, -8(%rbp)
+	cmpl	$0, -4(%rbp)
+	js	.L27
+	cmpl	$0, -8(%rbp)
+	jns	.L28
+.L27:
+	movl	$0, %eax
+	jmp	.L29
+.L28:
+	cmpl	$0, -8(%rbp)
+	jne	.L30
+	movl	$1, %eax
+	jmp	.L29
+.L30:
+	movl	$2147483647, %eax
+	cltd
+	idivl	-8(%rbp)
+	cmpl	%eax, -4(%rbp)
+	setle	%al
+	movzbl	%al, %eax
+.L29:
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4891:
+	.size	stbi__mul2sizes_valid, .-stbi__mul2sizes_valid
+	.type	stbi__mad2sizes_valid, @function
+stbi__mad2sizes_valid:
+.LFB4892:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movl	%edi, -4(%rbp)
+	movl	%esi, -8(%rbp)
+	movl	%edx, -12(%rbp)
+	movl	-8(%rbp), %edx
+	movl	-4(%rbp), %eax
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__mul2sizes_valid
+	testl	%eax, %eax
+	je	.L32
+	movl	-4(%rbp), %eax
+	imull	-8(%rbp), %eax
+	movl	-12(%rbp), %edx
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__addsizes_valid
+	testl	%eax, %eax
+	je	.L32
+	movl	$1, %eax
+	jmp	.L34
+.L32:
+	movl	$0, %eax
+.L34:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4892:
+	.size	stbi__mad2sizes_valid, .-stbi__mad2sizes_valid
+	.type	stbi__mad3sizes_valid, @function
+stbi__mad3sizes_valid:
+.LFB4893:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movl	%edi, -4(%rbp)
+	movl	%esi, -8(%rbp)
+	movl	%edx, -12(%rbp)
+	movl	%ecx, -16(%rbp)
+	movl	-8(%rbp), %edx
+	movl	-4(%rbp), %eax
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__mul2sizes_valid
+	testl	%eax, %eax
+	je	.L36
+	movl	-4(%rbp), %eax
+	imull	-8(%rbp), %eax
+	movl	-12(%rbp), %edx
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__mul2sizes_valid
+	testl	%eax, %eax
+	je	.L36
+	movl	-4(%rbp), %eax
+	imull	-8(%rbp), %eax
+	imull	-12(%rbp), %eax
+	movl	-16(%rbp), %edx
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__addsizes_valid
+	testl	%eax, %eax
+	je	.L36
+	movl	$1, %eax
+	jmp	.L38
+.L36:
+	movl	$0, %eax
+.L38:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4893:
+	.size	stbi__mad3sizes_valid, .-stbi__mad3sizes_valid
+	.type	stbi__mad4sizes_valid, @function
+stbi__mad4sizes_valid:
+.LFB4894:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$24, %rsp
+	movl	%edi, -4(%rbp)
+	movl	%esi, -8(%rbp)
+	movl	%edx, -12(%rbp)
+	movl	%ecx, -16(%rbp)
+	movl	%r8d, -20(%rbp)
+	movl	-8(%rbp), %edx
+	movl	-4(%rbp), %eax
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__mul2sizes_valid
+	testl	%eax, %eax
+	je	.L40
+	movl	-4(%rbp), %eax
+	imull	-8(%rbp), %eax
+	movl	-12(%rbp), %edx
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__mul2sizes_valid
+	testl	%eax, %eax
+	je	.L40
+	movl	-4(%rbp), %eax
+	imull	-8(%rbp), %eax
+	imull	-12(%rbp), %eax
+	movl	-16(%rbp), %edx
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__mul2sizes_valid
+	testl	%eax, %eax
+	je	.L40
+	movl	-4(%rbp), %eax
+	imull	-8(%rbp), %eax
+	imull	-12(%rbp), %eax
+	imull	-16(%rbp), %eax
+	movl	-20(%rbp), %edx
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__addsizes_valid
+	testl	%eax, %eax
+	je	.L40
+	movl	$1, %eax
+	jmp	.L42
+.L40:
+	movl	$0, %eax
+.L42:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4894:
+	.size	stbi__mad4sizes_valid, .-stbi__mad4sizes_valid
+	.type	stbi__malloc_mad2, @function
+stbi__malloc_mad2:
+.LFB4895:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movl	%edi, -4(%rbp)
+	movl	%esi, -8(%rbp)
+	movl	%edx, -12(%rbp)
+	movl	-12(%rbp), %edx
+	movl	-8(%rbp), %ecx
+	movl	-4(%rbp), %eax
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__mad2sizes_valid
+	testl	%eax, %eax
+	jne	.L44
+	movl	$0, %eax
+	jmp	.L45
+.L44:
+	movl	-4(%rbp), %eax
+	imull	-8(%rbp), %eax
+	movl	%eax, %edx
+	movl	-12(%rbp), %eax
+	addl	%edx, %eax
+	cltq
+	movq	%rax, %rdi
+	call	stbi__malloc
+.L45:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4895:
+	.size	stbi__malloc_mad2, .-stbi__malloc_mad2
+	.type	stbi__malloc_mad3, @function
+stbi__malloc_mad3:
+.LFB4896:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movl	%edi, -4(%rbp)
+	movl	%esi, -8(%rbp)
+	movl	%edx, -12(%rbp)
+	movl	%ecx, -16(%rbp)
+	movl	-16(%rbp), %ecx
+	movl	-12(%rbp), %edx
+	movl	-8(%rbp), %esi
+	movl	-4(%rbp), %eax
+	movl	%eax, %edi
+	call	stbi__mad3sizes_valid
+	testl	%eax, %eax
+	jne	.L47
+	movl	$0, %eax
+	jmp	.L48
+.L47:
+	movl	-4(%rbp), %eax
+	imull	-8(%rbp), %eax
+	imull	-12(%rbp), %eax
+	movl	%eax, %edx
+	movl	-16(%rbp), %eax
+	addl	%edx, %eax
+	cltq
+	movq	%rax, %rdi
+	call	stbi__malloc
+.L48:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4896:
+	.size	stbi__malloc_mad3, .-stbi__malloc_mad3
+	.type	stbi__malloc_mad4, @function
+stbi__malloc_mad4:
+.LFB4897:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movl	%edi, -4(%rbp)
+	movl	%esi, -8(%rbp)
+	movl	%edx, -12(%rbp)
+	movl	%ecx, -16(%rbp)
+	movl	%r8d, -20(%rbp)
+	movl	-20(%rbp), %edi
+	movl	-16(%rbp), %ecx
+	movl	-12(%rbp), %edx
+	movl	-8(%rbp), %esi
+	movl	-4(%rbp), %eax
+	movl	%edi, %r8d
+	movl	%eax, %edi
+	call	stbi__mad4sizes_valid
+	testl	%eax, %eax
+	jne	.L50
+	movl	$0, %eax
+	jmp	.L51
+.L50:
+	movl	-4(%rbp), %eax
+	imull	-8(%rbp), %eax
+	imull	-12(%rbp), %eax
+	imull	-16(%rbp), %eax
+	movl	%eax, %edx
+	movl	-20(%rbp), %eax
+	addl	%edx, %eax
+	cltq
+	movq	%rax, %rdi
+	call	stbi__malloc
+.L51:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4897:
+	.size	stbi__malloc_mad4, .-stbi__malloc_mad4
+	.type	stbi__addints_valid, @function
+stbi__addints_valid:
+.LFB4898:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -4(%rbp)
+	movl	%esi, -8(%rbp)
+	movl	-4(%rbp), %eax
+	xorl	-8(%rbp), %eax
+	testl	%eax, %eax
+	jns	.L53
+	movl	$1, %eax
+	jmp	.L54
+.L53:
+	cmpl	$0, -4(%rbp)
+	jns	.L55
+	cmpl	$0, -8(%rbp)
+	jns	.L55
+	movl	$-2147483648, %eax
+	subl	-8(%rbp), %eax
+	cmpl	%eax, -4(%rbp)
+	setge	%al
+	movzbl	%al, %eax
+	jmp	.L54
+.L55:
+	movl	$2147483647, %eax
+	subl	-8(%rbp), %eax
+	cmpl	%eax, -4(%rbp)
+	setle	%al
+	movzbl	%al, %eax
+.L54:
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4898:
+	.size	stbi__addints_valid, .-stbi__addints_valid
+	.type	stbi__mul2shorts_valid, @function
+stbi__mul2shorts_valid:
+.LFB4899:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -4(%rbp)
+	movl	%esi, -8(%rbp)
+	cmpl	$0, -8(%rbp)
+	je	.L57
+	cmpl	$-1, -8(%rbp)
+	jne	.L58
+.L57:
+	movl	$1, %eax
+	jmp	.L59
+.L58:
+	movl	-4(%rbp), %eax
+	xorl	-8(%rbp), %eax
+	testl	%eax, %eax
+	js	.L60
+	movl	$32767, %eax
+	cltd
+	idivl	-8(%rbp)
+	cmpl	%eax, -4(%rbp)
+	setle	%al
+	movzbl	%al, %eax
+	jmp	.L59
+.L60:
+	cmpl	$0, -8(%rbp)
+	jns	.L61
+	movl	$-32768, %eax
+	cltd
+	idivl	-8(%rbp)
+	cmpl	%eax, -4(%rbp)
+	setle	%al
+	movzbl	%al, %eax
+	jmp	.L59
+.L61:
+	movl	$-32768, %eax
+	cltd
+	idivl	-8(%rbp)
+	cmpl	%eax, -4(%rbp)
+	setge	%al
+	movzbl	%al, %eax
+.L59:
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4899:
+	.size	stbi__mul2shorts_valid, .-stbi__mul2shorts_valid
+	.globl	stbi_image_free
+	.type	stbi_image_free, @function
+stbi_image_free:
+.LFB4900:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	nop
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4900:
+	.size	stbi_image_free, .-stbi_image_free
+	.local	stbi__vertically_flip_on_load_global
+	.comm	stbi__vertically_flip_on_load_global,4,4
+	.globl	stbi_set_flip_vertically_on_load
+	.type	stbi_set_flip_vertically_on_load, @function
+stbi_set_flip_vertically_on_load:
+.LFB4901:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -4(%rbp)
+	movl	-4(%rbp), %eax
+	movl	%eax, stbi__vertically_flip_on_load_global(%rip)
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4901:
+	.size	stbi_set_flip_vertically_on_load, .-stbi_set_flip_vertically_on_load
+	.section	.tbss
+	.align 4
+	.type	stbi__vertically_flip_on_load_local, @object
+	.size	stbi__vertically_flip_on_load_local, 4
+stbi__vertically_flip_on_load_local:
+	.zero	4
+	.align 4
+	.type	stbi__vertically_flip_on_load_set, @object
+	.size	stbi__vertically_flip_on_load_set, 4
+stbi__vertically_flip_on_load_set:
+	.zero	4
+	.text
+	.globl	stbi_set_flip_vertically_on_load_thread
+	.type	stbi_set_flip_vertically_on_load_thread, @function
+stbi_set_flip_vertically_on_load_thread:
+.LFB4902:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -4(%rbp)
+	movl	-4(%rbp), %eax
+	movl	%eax, %fs:stbi__vertically_flip_on_load_local@tpoff
+	movl	$1, %fs:stbi__vertically_flip_on_load_set@tpoff
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4902:
+	.size	stbi_set_flip_vertically_on_load_thread, .-stbi_set_flip_vertically_on_load_thread
+	.type	stbi__load_main, @function
+stbi__load_main:
+.LFB4903:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movq	%rcx, -48(%rbp)
+	movl	%r8d, -52(%rbp)
+	movq	%r9, -64(%rbp)
+	movq	-64(%rbp), %rax
+	movl	$12, %edx
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+	movq	-64(%rbp), %rax
+	movl	$8, (%rax)
+	movq	-64(%rbp), %rax
+	movl	$0, 8(%rax)
+	movq	-64(%rbp), %rax
+	movl	$0, 4(%rax)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__png_test
+	testl	%eax, %eax
+	je	.L66
+	movq	-64(%rbp), %r8
+	movl	-52(%rbp), %edi
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rdx
+	movq	-32(%rbp), %rsi
+	movq	-24(%rbp), %rax
+	movq	%r8, %r9
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__png_load
+	jmp	.L67
+.L66:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__bmp_test
+	testl	%eax, %eax
+	je	.L68
+	movq	-64(%rbp), %r8
+	movl	-52(%rbp), %edi
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rdx
+	movq	-32(%rbp), %rsi
+	movq	-24(%rbp), %rax
+	movq	%r8, %r9
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__bmp_load
+	jmp	.L67
+.L68:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__gif_test
+	testl	%eax, %eax
+	je	.L69
+	movq	-64(%rbp), %r8
+	movl	-52(%rbp), %edi
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rdx
+	movq	-32(%rbp), %rsi
+	movq	-24(%rbp), %rax
+	movq	%r8, %r9
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__gif_load
+	jmp	.L67
+.L69:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__psd_test
+	testl	%eax, %eax
+	je	.L70
+	movq	-64(%rbp), %r9
+	movl	-52(%rbp), %r8d
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rdx
+	movq	-32(%rbp), %rsi
+	movq	-24(%rbp), %rax
+	subq	$8, %rsp
+	movl	16(%rbp), %edi
+	pushq	%rdi
+	movq	%rax, %rdi
+	call	stbi__psd_load
+	addq	$16, %rsp
+	jmp	.L67
+.L70:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__pic_test
+	testl	%eax, %eax
+	je	.L71
+	movq	-64(%rbp), %r8
+	movl	-52(%rbp), %edi
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rdx
+	movq	-32(%rbp), %rsi
+	movq	-24(%rbp), %rax
+	movq	%r8, %r9
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__pic_load
+	jmp	.L67
+.L71:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__jpeg_test
+	testl	%eax, %eax
+	je	.L72
+	movq	-64(%rbp), %r8
+	movl	-52(%rbp), %edi
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rdx
+	movq	-32(%rbp), %rsi
+	movq	-24(%rbp), %rax
+	movq	%r8, %r9
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__jpeg_load
+	jmp	.L67
+.L72:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__pnm_test
+	testl	%eax, %eax
+	je	.L73
+	movq	-64(%rbp), %r8
+	movl	-52(%rbp), %edi
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rdx
+	movq	-32(%rbp), %rsi
+	movq	-24(%rbp), %rax
+	movq	%r8, %r9
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__pnm_load
+	jmp	.L67
+.L73:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__hdr_test
+	testl	%eax, %eax
+	je	.L74
+	movq	-64(%rbp), %r8
+	movl	-52(%rbp), %edi
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rdx
+	movq	-32(%rbp), %rsi
+	movq	-24(%rbp), %rax
+	movq	%r8, %r9
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__hdr_load
+	movq	%rax, -8(%rbp)
+	cmpl	$0, -52(%rbp)
+	jne	.L75
+	movq	-48(%rbp), %rax
+	movl	(%rax), %eax
+	jmp	.L76
+.L75:
+	movl	-52(%rbp), %eax
+.L76:
+	movq	-40(%rbp), %rdx
+	movl	(%rdx), %edx
+	movq	-32(%rbp), %rcx
+	movl	(%rcx), %esi
+	movq	-8(%rbp), %rdi
+	movl	%eax, %ecx
+	call	stbi__hdr_to_ldr
+	jmp	.L67
+.L74:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__tga_test
+	testl	%eax, %eax
+	je	.L77
+	movq	-64(%rbp), %r8
+	movl	-52(%rbp), %edi
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rdx
+	movq	-32(%rbp), %rsi
+	movq	-24(%rbp), %rax
+	movq	%r8, %r9
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__tga_load
+	jmp	.L67
+.L77:
+	movl	$0, %eax
+.L67:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4903:
+	.size	stbi__load_main, .-stbi__load_main
+	.type	stbi__convert_16_to_8, @function
+stbi__convert_16_to_8:
+.LFB4904:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -24(%rbp)
+	movl	%esi, -28(%rbp)
+	movl	%edx, -32(%rbp)
+	movl	%ecx, -36(%rbp)
+	movl	-28(%rbp), %eax
+	imull	-32(%rbp), %eax
+	movl	-36(%rbp), %edx
+	imull	%edx, %eax
+	movl	%eax, -12(%rbp)
+	movl	-12(%rbp), %eax
+	cltq
+	movq	%rax, %rdi
+	call	stbi__malloc
+	movq	%rax, -8(%rbp)
+	cmpq	$0, -8(%rbp)
+	jne	.L79
+	movl	$0, %eax
+	jmp	.L80
+.L79:
+	movl	$0, -16(%rbp)
+	jmp	.L81
+.L82:
+	movl	-16(%rbp), %eax
+	cltq
+	leaq	(%rax,%rax), %rdx
+	movq	-24(%rbp), %rax
+	addq	%rdx, %rax
+	movzwl	(%rax), %eax
+	shrw	$8, %ax
+	movl	%eax, %ecx
+	movl	-16(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-8(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	addl	$1, -16(%rbp)
+.L81:
+	movl	-16(%rbp), %eax
+	cmpl	-12(%rbp), %eax
+	jl	.L82
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-8(%rbp), %rax
+.L80:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4904:
+	.size	stbi__convert_16_to_8, .-stbi__convert_16_to_8
+	.type	stbi__convert_8_to_16, @function
+stbi__convert_8_to_16:
+.LFB4905:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -24(%rbp)
+	movl	%esi, -28(%rbp)
+	movl	%edx, -32(%rbp)
+	movl	%ecx, -36(%rbp)
+	movl	-28(%rbp), %eax
+	imull	-32(%rbp), %eax
+	movl	-36(%rbp), %edx
+	imull	%edx, %eax
+	movl	%eax, -12(%rbp)
+	movl	-12(%rbp), %eax
+	addl	%eax, %eax
+	cltq
+	movq	%rax, %rdi
+	call	stbi__malloc
+	movq	%rax, -8(%rbp)
+	cmpq	$0, -8(%rbp)
+	jne	.L84
+	movl	$0, %eax
+	jmp	.L85
+.L84:
+	movl	$0, -16(%rbp)
+	jmp	.L86
+.L87:
+	movl	-16(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-24(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	sall	$8, %eax
+	movl	%eax, %esi
+	movl	-16(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-24(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %ecx
+	movl	-16(%rbp), %eax
+	cltq
+	leaq	(%rax,%rax), %rdx
+	movq	-8(%rbp), %rax
+	addq	%rdx, %rax
+	leal	(%rsi,%rcx), %edx
+	movw	%dx, (%rax)
+	addl	$1, -16(%rbp)
+.L86:
+	movl	-16(%rbp), %eax
+	cmpl	-12(%rbp), %eax
+	jl	.L87
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-8(%rbp), %rax
+.L85:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4905:
+	.size	stbi__convert_8_to_16, .-stbi__convert_8_to_16
+	.type	stbi__vertical_flip, @function
+stbi__vertical_flip:
+.LFB4906:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$2160, %rsp
+	movq	%rdi, -2136(%rbp)
+	movl	%esi, -2140(%rbp)
+	movl	%edx, -2144(%rbp)
+	movl	%ecx, -2148(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movl	-2140(%rbp), %eax
+	imull	-2148(%rbp), %eax
+	cltq
+	movq	%rax, -2088(%rbp)
+	movq	-2136(%rbp), %rax
+	movq	%rax, -2080(%rbp)
+	movl	$0, -2116(%rbp)
+	jmp	.L89
+.L92:
+	movl	-2116(%rbp), %eax
+	cltq
+	imulq	-2088(%rbp), %rax
+	movq	%rax, %rdx
+	movq	-2080(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -2112(%rbp)
+	movl	-2144(%rbp), %eax
+	subl	-2116(%rbp), %eax
+	subl	$1, %eax
+	cltq
+	imulq	-2088(%rbp), %rax
+	movq	%rax, %rdx
+	movq	-2080(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -2104(%rbp)
+	movq	-2088(%rbp), %rax
+	movq	%rax, -2096(%rbp)
+	jmp	.L90
+.L91:
+	movq	-2096(%rbp), %rax
+	movl	$2048, %edx
+	cmpq	%rdx, %rax
+	cmova	%rdx, %rax
+	movq	%rax, -2072(%rbp)
+	movq	-2072(%rbp), %rdx
+	movq	-2112(%rbp), %rcx
+	leaq	-2064(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	memcpy@PLT
+	movq	-2072(%rbp), %rdx
+	movq	-2104(%rbp), %rcx
+	movq	-2112(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	memcpy@PLT
+	movq	-2072(%rbp), %rdx
+	leaq	-2064(%rbp), %rcx
+	movq	-2104(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	memcpy@PLT
+	movq	-2072(%rbp), %rax
+	addq	%rax, -2112(%rbp)
+	movq	-2072(%rbp), %rax
+	addq	%rax, -2104(%rbp)
+	movq	-2072(%rbp), %rax
+	subq	%rax, -2096(%rbp)
+.L90:
+	cmpq	$0, -2096(%rbp)
+	jne	.L91
+	addl	$1, -2116(%rbp)
+.L89:
+	movl	-2144(%rbp), %eax
+	sarl	%eax
+	cmpl	%eax, -2116(%rbp)
+	jl	.L92
+	nop
+	movq	-8(%rbp), %rax
+	subq	%fs:40, %rax
+	je	.L93
+	call	__stack_chk_fail@PLT
+.L93:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4906:
+	.size	stbi__vertical_flip, .-stbi__vertical_flip
+	.type	stbi__vertical_flip_slices, @function
+stbi__vertical_flip_slices:
+.LFB4907:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -24(%rbp)
+	movl	%esi, -28(%rbp)
+	movl	%edx, -32(%rbp)
+	movl	%ecx, -36(%rbp)
+	movl	%r8d, -40(%rbp)
+	movl	-28(%rbp), %eax
+	imull	-32(%rbp), %eax
+	movl	-40(%rbp), %edx
+	imull	%edx, %eax
+	movl	%eax, -12(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, -8(%rbp)
+	movl	$0, -16(%rbp)
+	jmp	.L95
+.L96:
+	movl	-40(%rbp), %ecx
+	movl	-32(%rbp), %edx
+	movl	-28(%rbp), %esi
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__vertical_flip
+	movl	-12(%rbp), %eax
+	cltq
+	addq	%rax, -8(%rbp)
+	addl	$1, -16(%rbp)
+.L95:
+	movl	-16(%rbp), %eax
+	cmpl	-36(%rbp), %eax
+	jl	.L96
+	nop
+	nop
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4907:
+	.size	stbi__vertical_flip_slices, .-stbi__vertical_flip_slices
+	.section	.rodata
+.LC0:
+	.string	"stb_image.h"
+	.align 8
+.LC1:
+	.string	"ri.bits_per_channel == 8 || ri.bits_per_channel == 16"
+	.text
+	.type	stbi__load_and_postprocess_8bit, @function
+stbi__load_and_postprocess_8bit:
+.LFB4908:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$96, %rsp
+	movq	%rdi, -56(%rbp)
+	movq	%rsi, -64(%rbp)
+	movq	%rdx, -72(%rbp)
+	movq	%rcx, -80(%rbp)
+	movl	%r8d, -84(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	leaq	-20(%rbp), %r8
+	movl	-84(%rbp), %edi
+	movq	-80(%rbp), %rcx
+	movq	-72(%rbp), %rdx
+	movq	-64(%rbp), %rsi
+	movq	-56(%rbp), %rax
+	subq	$8, %rsp
+	pushq	$8
+	movq	%r8, %r9
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__load_main
+	addq	$16, %rsp
+	movq	%rax, -32(%rbp)
+	cmpq	$0, -32(%rbp)
+	jne	.L98
+	movl	$0, %eax
+	jmp	.L109
+.L98:
+	movl	-20(%rbp), %eax
+	cmpl	$8, %eax
+	je	.L100
+	movl	-20(%rbp), %eax
+	cmpl	$16, %eax
+	je	.L100
+	leaq	__PRETTY_FUNCTION__.20(%rip), %rax
+	movq	%rax, %rcx
+	movl	$1269, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC1(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L100:
+	movl	-20(%rbp), %eax
+	cmpl	$8, %eax
+	je	.L101
+	cmpl	$0, -84(%rbp)
+	jne	.L102
+	movq	-80(%rbp), %rax
+	movl	(%rax), %eax
+	jmp	.L103
+.L102:
+	movl	-84(%rbp), %eax
+.L103:
+	movq	-72(%rbp), %rdx
+	movl	(%rdx), %edx
+	movq	-64(%rbp), %rcx
+	movl	(%rcx), %esi
+	movq	-32(%rbp), %rdi
+	movl	%eax, %ecx
+	call	stbi__convert_16_to_8
+	movq	%rax, -32(%rbp)
+	movl	$8, -20(%rbp)
+.L101:
+	movl	%fs:stbi__vertically_flip_on_load_set@tpoff, %eax
+	testl	%eax, %eax
+	je	.L104
+	movl	%fs:stbi__vertically_flip_on_load_local@tpoff, %eax
+	testl	%eax, %eax
+	setne	%al
+	jmp	.L105
+.L104:
+	movl	stbi__vertically_flip_on_load_global(%rip), %eax
+	testl	%eax, %eax
+	setne	%al
+.L105:
+	testb	%al, %al
+	je	.L106
+	cmpl	$0, -84(%rbp)
+	jne	.L107
+	movq	-80(%rbp), %rax
+	movl	(%rax), %eax
+	jmp	.L108
+.L107:
+	movl	-84(%rbp), %eax
+.L108:
+	movl	%eax, -36(%rbp)
+	movq	-72(%rbp), %rax
+	movl	(%rax), %edx
+	movq	-64(%rbp), %rax
+	movl	(%rax), %esi
+	movl	-36(%rbp), %ecx
+	movq	-32(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__vertical_flip
+.L106:
+	movq	-32(%rbp), %rax
+.L109:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L110
+	call	__stack_chk_fail@PLT
+.L110:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4908:
+	.size	stbi__load_and_postprocess_8bit, .-stbi__load_and_postprocess_8bit
+	.type	stbi__load_and_postprocess_16bit, @function
+stbi__load_and_postprocess_16bit:
+.LFB4909:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$96, %rsp
+	movq	%rdi, -56(%rbp)
+	movq	%rsi, -64(%rbp)
+	movq	%rdx, -72(%rbp)
+	movq	%rcx, -80(%rbp)
+	movl	%r8d, -84(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	leaq	-20(%rbp), %r8
+	movl	-84(%rbp), %edi
+	movq	-80(%rbp), %rcx
+	movq	-72(%rbp), %rdx
+	movq	-64(%rbp), %rsi
+	movq	-56(%rbp), %rax
+	subq	$8, %rsp
+	pushq	$16
+	movq	%r8, %r9
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__load_main
+	addq	$16, %rsp
+	movq	%rax, -32(%rbp)
+	cmpq	$0, -32(%rbp)
+	jne	.L112
+	movl	$0, %eax
+	jmp	.L123
+.L112:
+	movl	-20(%rbp), %eax
+	cmpl	$8, %eax
+	je	.L114
+	movl	-20(%rbp), %eax
+	cmpl	$16, %eax
+	je	.L114
+	leaq	__PRETTY_FUNCTION__.19(%rip), %rax
+	movq	%rax, %rcx
+	movl	$1295, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC1(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L114:
+	movl	-20(%rbp), %eax
+	cmpl	$16, %eax
+	je	.L115
+	cmpl	$0, -84(%rbp)
+	jne	.L116
+	movq	-80(%rbp), %rax
+	movl	(%rax), %eax
+	jmp	.L117
+.L116:
+	movl	-84(%rbp), %eax
+.L117:
+	movq	-72(%rbp), %rdx
+	movl	(%rdx), %edx
+	movq	-64(%rbp), %rcx
+	movl	(%rcx), %esi
+	movq	-32(%rbp), %rdi
+	movl	%eax, %ecx
+	call	stbi__convert_8_to_16
+	movq	%rax, -32(%rbp)
+	movl	$16, -20(%rbp)
+.L115:
+	movl	%fs:stbi__vertically_flip_on_load_set@tpoff, %eax
+	testl	%eax, %eax
+	je	.L118
+	movl	%fs:stbi__vertically_flip_on_load_local@tpoff, %eax
+	testl	%eax, %eax
+	setne	%al
+	jmp	.L119
+.L118:
+	movl	stbi__vertically_flip_on_load_global(%rip), %eax
+	testl	%eax, %eax
+	setne	%al
+.L119:
+	testb	%al, %al
+	je	.L120
+	cmpl	$0, -84(%rbp)
+	jne	.L121
+	movq	-80(%rbp), %rax
+	movl	(%rax), %eax
+	jmp	.L122
+.L121:
+	movl	-84(%rbp), %eax
+.L122:
+	movl	%eax, -36(%rbp)
+	movl	-36(%rbp), %eax
+	cltq
+	addl	%eax, %eax
+	movl	%eax, %ecx
+	movq	-72(%rbp), %rax
+	movl	(%rax), %edx
+	movq	-64(%rbp), %rax
+	movl	(%rax), %esi
+	movq	-32(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__vertical_flip
+.L120:
+	movq	-32(%rbp), %rax
+.L123:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L124
+	call	__stack_chk_fail@PLT
+.L124:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4909:
+	.size	stbi__load_and_postprocess_16bit, .-stbi__load_and_postprocess_16bit
+	.type	stbi__float_postprocess, @function
+stbi__float_postprocess:
+.LFB4910:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movq	%rcx, -48(%rbp)
+	movl	%r8d, -52(%rbp)
+	movl	%fs:stbi__vertically_flip_on_load_set@tpoff, %eax
+	testl	%eax, %eax
+	je	.L126
+	movl	%fs:stbi__vertically_flip_on_load_local@tpoff, %eax
+	testl	%eax, %eax
+	setne	%al
+	jmp	.L127
+.L126:
+	movl	stbi__vertically_flip_on_load_global(%rip), %eax
+	testl	%eax, %eax
+	setne	%al
+.L127:
+	testb	%al, %al
+	je	.L131
+	cmpq	$0, -24(%rbp)
+	je	.L131
+	cmpl	$0, -52(%rbp)
+	jne	.L129
+	movq	-48(%rbp), %rax
+	movl	(%rax), %eax
+	jmp	.L130
+.L129:
+	movl	-52(%rbp), %eax
+.L130:
+	movl	%eax, -4(%rbp)
+	movl	-4(%rbp), %eax
+	cltq
+	sall	$2, %eax
+	movl	%eax, %ecx
+	movq	-40(%rbp), %rax
+	movl	(%rax), %edx
+	movq	-32(%rbp), %rax
+	movl	(%rax), %esi
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__vertical_flip
+.L131:
+	nop
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4910:
+	.size	stbi__float_postprocess, .-stbi__float_postprocess
+	.type	stbi__fopen, @function
+stbi__fopen:
+.LFB4911:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	-32(%rbp), %rdx
+	movq	-24(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	fopen@PLT
+	movq	%rax, -8(%rbp)
+	movq	-8(%rbp), %rax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4911:
+	.size	stbi__fopen, .-stbi__fopen
+	.section	.rodata
+.LC2:
+	.string	"rb"
+	.text
+	.globl	stbi_load
+	.type	stbi_load, @function
+stbi_load:
+.LFB4912:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movq	%rcx, -48(%rbp)
+	movl	%r8d, -52(%rbp)
+	movq	-24(%rbp), %rax
+	leaq	.LC2(%rip), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__fopen
+	movq	%rax, -16(%rbp)
+	cmpq	$0, -16(%rbp)
+	jne	.L135
+	movl	$0, %eax
+	jmp	.L136
+.L135:
+	movl	-52(%rbp), %edi
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rdx
+	movq	-32(%rbp), %rsi
+	movq	-16(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi_load_from_file
+	movq	%rax, -8(%rbp)
+	movq	-16(%rbp), %rax
+	movq	%rax, %rdi
+	call	fclose@PLT
+	movq	-8(%rbp), %rax
+.L136:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4912:
+	.size	stbi_load, .-stbi_load
+	.globl	stbi_load_from_file
+	.type	stbi_load_from_file, @function
+stbi_load_from_file:
+.LFB4913:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$304, %rsp
+	movq	%rdi, -264(%rbp)
+	movq	%rsi, -272(%rbp)
+	movq	%rdx, -280(%rbp)
+	movq	%rcx, -288(%rbp)
+	movl	%r8d, -292(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-264(%rbp), %rdx
+	leaq	-240(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_file
+	movl	-292(%rbp), %edi
+	movq	-288(%rbp), %rcx
+	movq	-280(%rbp), %rdx
+	movq	-272(%rbp), %rsi
+	leaq	-240(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__load_and_postprocess_8bit
+	movq	%rax, -248(%rbp)
+	cmpq	$0, -248(%rbp)
+	je	.L138
+	movq	-40(%rbp), %rdx
+	movq	-48(%rbp), %rax
+	subq	%rax, %rdx
+	movl	%edx, %eax
+	negl	%eax
+	movslq	%eax, %rcx
+	movq	-264(%rbp), %rax
+	movl	$1, %edx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	fseek@PLT
+.L138:
+	movq	-248(%rbp), %rax
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L140
+	call	__stack_chk_fail@PLT
+.L140:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4913:
+	.size	stbi_load_from_file, .-stbi_load_from_file
+	.globl	stbi_load_from_file_16
+	.type	stbi_load_from_file_16, @function
+stbi_load_from_file_16:
+.LFB4914:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$304, %rsp
+	movq	%rdi, -264(%rbp)
+	movq	%rsi, -272(%rbp)
+	movq	%rdx, -280(%rbp)
+	movq	%rcx, -288(%rbp)
+	movl	%r8d, -292(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-264(%rbp), %rdx
+	leaq	-240(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_file
+	movl	-292(%rbp), %edi
+	movq	-288(%rbp), %rcx
+	movq	-280(%rbp), %rdx
+	movq	-272(%rbp), %rsi
+	leaq	-240(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__load_and_postprocess_16bit
+	movq	%rax, -248(%rbp)
+	cmpq	$0, -248(%rbp)
+	je	.L142
+	movq	-40(%rbp), %rdx
+	movq	-48(%rbp), %rax
+	subq	%rax, %rdx
+	movl	%edx, %eax
+	negl	%eax
+	movslq	%eax, %rcx
+	movq	-264(%rbp), %rax
+	movl	$1, %edx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	fseek@PLT
+.L142:
+	movq	-248(%rbp), %rax
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L144
+	call	__stack_chk_fail@PLT
+.L144:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4914:
+	.size	stbi_load_from_file_16, .-stbi_load_from_file_16
+	.globl	stbi_load_16
+	.type	stbi_load_16, @function
+stbi_load_16:
+.LFB4915:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movq	%rcx, -48(%rbp)
+	movl	%r8d, -52(%rbp)
+	movq	-24(%rbp), %rax
+	leaq	.LC2(%rip), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__fopen
+	movq	%rax, -16(%rbp)
+	cmpq	$0, -16(%rbp)
+	jne	.L146
+	movl	$0, %eax
+	jmp	.L147
+.L146:
+	movl	-52(%rbp), %edi
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rdx
+	movq	-32(%rbp), %rsi
+	movq	-16(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi_load_from_file_16
+	movq	%rax, -8(%rbp)
+	movq	-16(%rbp), %rax
+	movq	%rax, %rdi
+	call	fclose@PLT
+	movq	-8(%rbp), %rax
+.L147:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4915:
+	.size	stbi_load_16, .-stbi_load_16
+	.globl	stbi_load_16_from_memory
+	.type	stbi_load_16_from_memory, @function
+stbi_load_16_from_memory:
+.LFB4916:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$288, %rsp
+	movq	%rdi, -248(%rbp)
+	movl	%esi, -252(%rbp)
+	movq	%rdx, -264(%rbp)
+	movq	%rcx, -272(%rbp)
+	movq	%r8, -280(%rbp)
+	movl	%r9d, -256(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movl	-252(%rbp), %edx
+	movq	-248(%rbp), %rcx
+	leaq	-240(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_mem
+	movl	-256(%rbp), %edi
+	movq	-280(%rbp), %rcx
+	movq	-272(%rbp), %rdx
+	movq	-264(%rbp), %rsi
+	leaq	-240(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__load_and_postprocess_16bit
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L150
+	call	__stack_chk_fail@PLT
+.L150:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4916:
+	.size	stbi_load_16_from_memory, .-stbi_load_16_from_memory
+	.globl	stbi_load_16_from_callbacks
+	.type	stbi_load_16_from_callbacks, @function
+stbi_load_16_from_callbacks:
+.LFB4917:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$288, %rsp
+	movq	%rdi, -248(%rbp)
+	movq	%rsi, -256(%rbp)
+	movq	%rdx, -264(%rbp)
+	movq	%rcx, -272(%rbp)
+	movq	%r8, -280(%rbp)
+	movl	%r9d, -284(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-256(%rbp), %rdx
+	movq	-248(%rbp), %rcx
+	leaq	-240(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_callbacks
+	movl	-284(%rbp), %edi
+	movq	-280(%rbp), %rcx
+	movq	-272(%rbp), %rdx
+	movq	-264(%rbp), %rsi
+	leaq	-240(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__load_and_postprocess_16bit
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L153
+	call	__stack_chk_fail@PLT
+.L153:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4917:
+	.size	stbi_load_16_from_callbacks, .-stbi_load_16_from_callbacks
+	.globl	stbi_load_from_memory
+	.type	stbi_load_from_memory, @function
+stbi_load_from_memory:
+.LFB4918:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$288, %rsp
+	movq	%rdi, -248(%rbp)
+	movl	%esi, -252(%rbp)
+	movq	%rdx, -264(%rbp)
+	movq	%rcx, -272(%rbp)
+	movq	%r8, -280(%rbp)
+	movl	%r9d, -256(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movl	-252(%rbp), %edx
+	movq	-248(%rbp), %rcx
+	leaq	-240(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_mem
+	movl	-256(%rbp), %edi
+	movq	-280(%rbp), %rcx
+	movq	-272(%rbp), %rdx
+	movq	-264(%rbp), %rsi
+	leaq	-240(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__load_and_postprocess_8bit
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L156
+	call	__stack_chk_fail@PLT
+.L156:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4918:
+	.size	stbi_load_from_memory, .-stbi_load_from_memory
+	.globl	stbi_load_from_callbacks
+	.type	stbi_load_from_callbacks, @function
+stbi_load_from_callbacks:
+.LFB4919:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$288, %rsp
+	movq	%rdi, -248(%rbp)
+	movq	%rsi, -256(%rbp)
+	movq	%rdx, -264(%rbp)
+	movq	%rcx, -272(%rbp)
+	movq	%r8, -280(%rbp)
+	movl	%r9d, -284(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-256(%rbp), %rdx
+	movq	-248(%rbp), %rcx
+	leaq	-240(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_callbacks
+	movl	-284(%rbp), %edi
+	movq	-280(%rbp), %rcx
+	movq	-272(%rbp), %rdx
+	movq	-264(%rbp), %rsi
+	leaq	-240(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__load_and_postprocess_8bit
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L159
+	call	__stack_chk_fail@PLT
+.L159:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4919:
+	.size	stbi_load_from_callbacks, .-stbi_load_from_callbacks
+	.globl	stbi_load_gif_from_memory
+	.type	stbi_load_gif_from_memory, @function
+stbi_load_gif_from_memory:
+.LFB4920:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$320, %rsp
+	movq	%rdi, -264(%rbp)
+	movl	%esi, -268(%rbp)
+	movq	%rdx, -280(%rbp)
+	movq	%rcx, -288(%rbp)
+	movq	%r8, -296(%rbp)
+	movq	%r9, -304(%rbp)
+	movq	16(%rbp), %rax
+	movq	%rax, -312(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movl	-268(%rbp), %edx
+	movq	-264(%rbp), %rcx
+	leaq	-240(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_mem
+	movq	-312(%rbp), %r9
+	movq	-304(%rbp), %r8
+	movq	-296(%rbp), %rcx
+	movq	-288(%rbp), %rdx
+	movq	-280(%rbp), %rsi
+	leaq	-240(%rbp), %rax
+	subq	$8, %rsp
+	movl	24(%rbp), %edi
+	pushq	%rdi
+	movq	%rax, %rdi
+	call	stbi__load_gif_main
+	addq	$16, %rsp
+	movq	%rax, -248(%rbp)
+	movl	%fs:stbi__vertically_flip_on_load_set@tpoff, %eax
+	testl	%eax, %eax
+	je	.L161
+	movl	%fs:stbi__vertically_flip_on_load_local@tpoff, %eax
+	testl	%eax, %eax
+	setne	%al
+	jmp	.L162
+.L161:
+	movl	stbi__vertically_flip_on_load_global(%rip), %eax
+	testl	%eax, %eax
+	setne	%al
+.L162:
+	testb	%al, %al
+	je	.L163
+	movq	-312(%rbp), %rax
+	movl	(%rax), %edi
+	movq	-304(%rbp), %rax
+	movl	(%rax), %ecx
+	movq	-296(%rbp), %rax
+	movl	(%rax), %edx
+	movq	-288(%rbp), %rax
+	movl	(%rax), %esi
+	movq	-248(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__vertical_flip_slices
+.L163:
+	movq	-248(%rbp), %rax
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L165
+	call	__stack_chk_fail@PLT
+.L165:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4920:
+	.size	stbi_load_gif_from_memory, .-stbi_load_gif_from_memory
+	.type	stbi__loadf_main, @function
+stbi__loadf_main:
+.LFB4921:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$96, %rsp
+	movq	%rdi, -56(%rbp)
+	movq	%rsi, -64(%rbp)
+	movq	%rdx, -72(%rbp)
+	movq	%rcx, -80(%rbp)
+	movl	%r8d, -84(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-56(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__hdr_test
+	testl	%eax, %eax
+	je	.L167
+	leaq	-20(%rbp), %r8
+	movl	-84(%rbp), %edi
+	movq	-80(%rbp), %rcx
+	movq	-72(%rbp), %rdx
+	movq	-64(%rbp), %rsi
+	movq	-56(%rbp), %rax
+	movq	%r8, %r9
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__hdr_load
+	movq	%rax, -32(%rbp)
+	cmpq	$0, -32(%rbp)
+	je	.L168
+	movl	-84(%rbp), %edi
+	movq	-80(%rbp), %rcx
+	movq	-72(%rbp), %rdx
+	movq	-64(%rbp), %rsi
+	movq	-32(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__float_postprocess
+.L168:
+	movq	-32(%rbp), %rax
+	jmp	.L169
+.L167:
+	movl	-84(%rbp), %edi
+	movq	-80(%rbp), %rcx
+	movq	-72(%rbp), %rdx
+	movq	-64(%rbp), %rsi
+	movq	-56(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__load_and_postprocess_8bit
+	movq	%rax, -40(%rbp)
+	cmpq	$0, -40(%rbp)
+	je	.L170
+	cmpl	$0, -84(%rbp)
+	jne	.L171
+	movq	-80(%rbp), %rax
+	movl	(%rax), %eax
+	jmp	.L172
+.L171:
+	movl	-84(%rbp), %eax
+.L172:
+	movq	-72(%rbp), %rdx
+	movl	(%rdx), %edx
+	movq	-64(%rbp), %rcx
+	movl	(%rcx), %esi
+	movq	-40(%rbp), %rdi
+	movl	%eax, %ecx
+	call	stbi__ldr_to_hdr
+	jmp	.L169
+.L170:
+	movl	$0, %eax
+.L169:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L173
+	call	__stack_chk_fail@PLT
+.L173:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4921:
+	.size	stbi__loadf_main, .-stbi__loadf_main
+	.globl	stbi_loadf_from_memory
+	.type	stbi_loadf_from_memory, @function
+stbi_loadf_from_memory:
+.LFB4922:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$288, %rsp
+	movq	%rdi, -248(%rbp)
+	movl	%esi, -252(%rbp)
+	movq	%rdx, -264(%rbp)
+	movq	%rcx, -272(%rbp)
+	movq	%r8, -280(%rbp)
+	movl	%r9d, -256(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movl	-252(%rbp), %edx
+	movq	-248(%rbp), %rcx
+	leaq	-240(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_mem
+	movl	-256(%rbp), %edi
+	movq	-280(%rbp), %rcx
+	movq	-272(%rbp), %rdx
+	movq	-264(%rbp), %rsi
+	leaq	-240(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__loadf_main
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L176
+	call	__stack_chk_fail@PLT
+.L176:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4922:
+	.size	stbi_loadf_from_memory, .-stbi_loadf_from_memory
+	.globl	stbi_loadf_from_callbacks
+	.type	stbi_loadf_from_callbacks, @function
+stbi_loadf_from_callbacks:
+.LFB4923:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$288, %rsp
+	movq	%rdi, -248(%rbp)
+	movq	%rsi, -256(%rbp)
+	movq	%rdx, -264(%rbp)
+	movq	%rcx, -272(%rbp)
+	movq	%r8, -280(%rbp)
+	movl	%r9d, -284(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-256(%rbp), %rdx
+	movq	-248(%rbp), %rcx
+	leaq	-240(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_callbacks
+	movl	-284(%rbp), %edi
+	movq	-280(%rbp), %rcx
+	movq	-272(%rbp), %rdx
+	movq	-264(%rbp), %rsi
+	leaq	-240(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__loadf_main
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L179
+	call	__stack_chk_fail@PLT
+.L179:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4923:
+	.size	stbi_loadf_from_callbacks, .-stbi_loadf_from_callbacks
+	.globl	stbi_loadf
+	.type	stbi_loadf, @function
+stbi_loadf:
+.LFB4924:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movq	%rcx, -48(%rbp)
+	movl	%r8d, -52(%rbp)
+	movq	-24(%rbp), %rax
+	leaq	.LC2(%rip), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__fopen
+	movq	%rax, -16(%rbp)
+	cmpq	$0, -16(%rbp)
+	jne	.L181
+	movl	$0, %eax
+	jmp	.L182
+.L181:
+	movl	-52(%rbp), %edi
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rdx
+	movq	-32(%rbp), %rsi
+	movq	-16(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi_loadf_from_file
+	movq	%rax, -8(%rbp)
+	movq	-16(%rbp), %rax
+	movq	%rax, %rdi
+	call	fclose@PLT
+	movq	-8(%rbp), %rax
+.L182:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4924:
+	.size	stbi_loadf, .-stbi_loadf
+	.globl	stbi_loadf_from_file
+	.type	stbi_loadf_from_file, @function
+stbi_loadf_from_file:
+.LFB4925:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$288, %rsp
+	movq	%rdi, -248(%rbp)
+	movq	%rsi, -256(%rbp)
+	movq	%rdx, -264(%rbp)
+	movq	%rcx, -272(%rbp)
+	movl	%r8d, -276(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-248(%rbp), %rdx
+	leaq	-240(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_file
+	movl	-276(%rbp), %edi
+	movq	-272(%rbp), %rcx
+	movq	-264(%rbp), %rdx
+	movq	-256(%rbp), %rsi
+	leaq	-240(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__loadf_main
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L185
+	call	__stack_chk_fail@PLT
+.L185:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4925:
+	.size	stbi_loadf_from_file, .-stbi_loadf_from_file
+	.globl	stbi_is_hdr_from_memory
+	.type	stbi_is_hdr_from_memory, @function
+stbi_is_hdr_from_memory:
+.LFB4926:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$256, %rsp
+	movq	%rdi, -248(%rbp)
+	movl	%esi, -252(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movl	-252(%rbp), %edx
+	movq	-248(%rbp), %rcx
+	leaq	-240(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_mem
+	leaq	-240(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__hdr_test
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L188
+	call	__stack_chk_fail@PLT
+.L188:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4926:
+	.size	stbi_is_hdr_from_memory, .-stbi_is_hdr_from_memory
+	.globl	stbi_is_hdr
+	.type	stbi_is_hdr, @function
+stbi_is_hdr:
+.LFB4927:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	leaq	.LC2(%rip), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__fopen
+	movq	%rax, -8(%rbp)
+	movl	$0, -12(%rbp)
+	cmpq	$0, -8(%rbp)
+	je	.L190
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi_is_hdr_from_file
+	movl	%eax, -12(%rbp)
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	fclose@PLT
+.L190:
+	movl	-12(%rbp), %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4927:
+	.size	stbi_is_hdr, .-stbi_is_hdr
+	.globl	stbi_is_hdr_from_file
+	.type	stbi_is_hdr_from_file, @function
+stbi_is_hdr_from_file:
+.LFB4928:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$272, %rsp
+	movq	%rdi, -264(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-264(%rbp), %rax
+	movq	%rax, %rdi
+	call	ftell@PLT
+	movq	%rax, -248(%rbp)
+	movq	-264(%rbp), %rdx
+	leaq	-240(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_file
+	leaq	-240(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__hdr_test
+	movl	%eax, -252(%rbp)
+	movq	-248(%rbp), %rcx
+	movq	-264(%rbp), %rax
+	movl	$0, %edx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	fseek@PLT
+	movl	-252(%rbp), %eax
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L194
+	call	__stack_chk_fail@PLT
+.L194:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4928:
+	.size	stbi_is_hdr_from_file, .-stbi_is_hdr_from_file
+	.globl	stbi_is_hdr_from_callbacks
+	.type	stbi_is_hdr_from_callbacks, @function
+stbi_is_hdr_from_callbacks:
+.LFB4929:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$256, %rsp
+	movq	%rdi, -248(%rbp)
+	movq	%rsi, -256(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-256(%rbp), %rdx
+	movq	-248(%rbp), %rcx
+	leaq	-240(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_callbacks
+	leaq	-240(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__hdr_test
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L197
+	call	__stack_chk_fail@PLT
+.L197:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4929:
+	.size	stbi_is_hdr_from_callbacks, .-stbi_is_hdr_from_callbacks
+	.data
+	.align 4
+	.type	stbi__l2h_gamma, @object
+	.size	stbi__l2h_gamma, 4
+stbi__l2h_gamma:
+	.long	1074580685
+	.align 4
+	.type	stbi__l2h_scale, @object
+	.size	stbi__l2h_scale, 4
+stbi__l2h_scale:
+	.long	1065353216
+	.text
+	.globl	stbi_ldr_to_hdr_gamma
+	.type	stbi_ldr_to_hdr_gamma, @function
+stbi_ldr_to_hdr_gamma:
+.LFB4930:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movss	%xmm0, -4(%rbp)
+	movss	-4(%rbp), %xmm0
+	movss	%xmm0, stbi__l2h_gamma(%rip)
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4930:
+	.size	stbi_ldr_to_hdr_gamma, .-stbi_ldr_to_hdr_gamma
+	.globl	stbi_ldr_to_hdr_scale
+	.type	stbi_ldr_to_hdr_scale, @function
+stbi_ldr_to_hdr_scale:
+.LFB4931:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movss	%xmm0, -4(%rbp)
+	movss	-4(%rbp), %xmm0
+	movss	%xmm0, stbi__l2h_scale(%rip)
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4931:
+	.size	stbi_ldr_to_hdr_scale, .-stbi_ldr_to_hdr_scale
+	.data
+	.align 4
+	.type	stbi__h2l_gamma_i, @object
+	.size	stbi__h2l_gamma_i, 4
+stbi__h2l_gamma_i:
+	.long	1055439406
+	.align 4
+	.type	stbi__h2l_scale_i, @object
+	.size	stbi__h2l_scale_i, 4
+stbi__h2l_scale_i:
+	.long	1065353216
+	.text
+	.globl	stbi_hdr_to_ldr_gamma
+	.type	stbi_hdr_to_ldr_gamma, @function
+stbi_hdr_to_ldr_gamma:
+.LFB4932:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movss	%xmm0, -4(%rbp)
+	movss	.LC3(%rip), %xmm0
+	divss	-4(%rbp), %xmm0
+	movss	%xmm0, stbi__h2l_gamma_i(%rip)
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4932:
+	.size	stbi_hdr_to_ldr_gamma, .-stbi_hdr_to_ldr_gamma
+	.globl	stbi_hdr_to_ldr_scale
+	.type	stbi_hdr_to_ldr_scale, @function
+stbi_hdr_to_ldr_scale:
+.LFB4933:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movss	%xmm0, -4(%rbp)
+	movss	.LC3(%rip), %xmm0
+	divss	-4(%rbp), %xmm0
+	movss	%xmm0, stbi__h2l_scale_i(%rip)
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4933:
+	.size	stbi_hdr_to_ldr_scale, .-stbi_hdr_to_ldr_scale
+	.type	stbi__refill_buffer, @function
+stbi__refill_buffer:
+.LFB4934:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movq	16(%rax), %rcx
+	movq	-24(%rbp), %rax
+	movl	52(%rax), %edx
+	movq	-24(%rbp), %rax
+	leaq	56(%rax), %rsi
+	movq	-24(%rbp), %rax
+	movq	40(%rax), %rax
+	movq	%rax, %rdi
+	call	*%rcx
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movl	184(%rax), %edx
+	movq	-24(%rbp), %rax
+	movq	192(%rax), %rcx
+	movq	-24(%rbp), %rax
+	movq	208(%rax), %rax
+	subq	%rax, %rcx
+	movl	%ecx, %eax
+	addl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 184(%rax)
+	cmpl	$0, -4(%rbp)
+	jne	.L203
+	movq	-24(%rbp), %rax
+	movl	$0, 48(%rax)
+	movq	-24(%rbp), %rax
+	leaq	56(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movq	%rdx, 192(%rax)
+	movq	-24(%rbp), %rax
+	addq	$56, %rax
+	leaq	1(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movq	%rdx, 200(%rax)
+	movq	-24(%rbp), %rax
+	movq	192(%rax), %rax
+	movb	$0, (%rax)
+	jmp	.L205
+.L203:
+	movq	-24(%rbp), %rax
+	leaq	56(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movq	%rdx, 192(%rax)
+	movq	-24(%rbp), %rax
+	leaq	56(%rax), %rdx
+	movl	-4(%rbp), %eax
+	cltq
+	addq	%rax, %rdx
+	movq	-24(%rbp), %rax
+	movq	%rdx, 200(%rax)
+.L205:
+	nop
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4934:
+	.size	stbi__refill_buffer, .-stbi__refill_buffer
+	.type	stbi__get8, @function
+stbi__get8:
+.LFB4935:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movq	192(%rax), %rdx
+	movq	-8(%rbp), %rax
+	movq	200(%rax), %rax
+	cmpq	%rax, %rdx
+	jnb	.L207
+	movq	-8(%rbp), %rax
+	movq	192(%rax), %rax
+	leaq	1(%rax), %rcx
+	movq	-8(%rbp), %rdx
+	movq	%rcx, 192(%rdx)
+	movzbl	(%rax), %eax
+	jmp	.L208
+.L207:
+	movq	-8(%rbp), %rax
+	movl	48(%rax), %eax
+	testl	%eax, %eax
+	je	.L209
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__refill_buffer
+	movq	-8(%rbp), %rax
+	movq	192(%rax), %rax
+	leaq	1(%rax), %rcx
+	movq	-8(%rbp), %rdx
+	movq	%rcx, 192(%rdx)
+	movzbl	(%rax), %eax
+	jmp	.L208
+.L209:
+	movl	$0, %eax
+.L208:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4935:
+	.size	stbi__get8, .-stbi__get8
+	.type	stbi__at_eof, @function
+stbi__at_eof:
+.LFB4936:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movq	16(%rax), %rax
+	testq	%rax, %rax
+	je	.L211
+	movq	-8(%rbp), %rax
+	movq	32(%rax), %rdx
+	movq	-8(%rbp), %rax
+	movq	40(%rax), %rax
+	movq	%rax, %rdi
+	call	*%rdx
+	testl	%eax, %eax
+	jne	.L212
+	movl	$0, %eax
+	jmp	.L213
+.L212:
+	movq	-8(%rbp), %rax
+	movl	48(%rax), %eax
+	testl	%eax, %eax
+	jne	.L211
+	movl	$1, %eax
+	jmp	.L213
+.L211:
+	movq	-8(%rbp), %rax
+	movq	192(%rax), %rdx
+	movq	-8(%rbp), %rax
+	movq	200(%rax), %rax
+	cmpq	%rax, %rdx
+	setnb	%al
+	movzbl	%al, %eax
+.L213:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4936:
+	.size	stbi__at_eof, .-stbi__at_eof
+	.type	stbi__skip, @function
+stbi__skip:
+.LFB4937:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movl	%esi, -28(%rbp)
+	cmpl	$0, -28(%rbp)
+	je	.L219
+	cmpl	$0, -28(%rbp)
+	jns	.L217
+	movq	-24(%rbp), %rax
+	movq	200(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movq	%rdx, 192(%rax)
+	jmp	.L214
+.L217:
+	movq	-24(%rbp), %rax
+	movq	16(%rax), %rax
+	testq	%rax, %rax
+	je	.L218
+	movq	-24(%rbp), %rax
+	movq	200(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movq	192(%rax), %rax
+	subq	%rax, %rdx
+	movl	%edx, -4(%rbp)
+	movl	-4(%rbp), %eax
+	cmpl	-28(%rbp), %eax
+	jge	.L218
+	movq	-24(%rbp), %rax
+	movq	200(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movq	%rdx, 192(%rax)
+	movq	-24(%rbp), %rax
+	movq	24(%rax), %rcx
+	movl	-28(%rbp), %eax
+	subl	-4(%rbp), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movq	40(%rax), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	*%rcx
+	jmp	.L214
+.L218:
+	movq	-24(%rbp), %rax
+	movq	192(%rax), %rdx
+	movl	-28(%rbp), %eax
+	cltq
+	addq	%rax, %rdx
+	movq	-24(%rbp), %rax
+	movq	%rdx, 192(%rax)
+	jmp	.L214
+.L219:
+	nop
+.L214:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4937:
+	.size	stbi__skip, .-stbi__skip
+	.type	stbi__getn, @function
+stbi__getn:
+.LFB4938:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movl	%edx, -36(%rbp)
+	movq	-24(%rbp), %rax
+	movq	16(%rax), %rax
+	testq	%rax, %rax
+	je	.L221
+	movq	-24(%rbp), %rax
+	movq	200(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movq	192(%rax), %rax
+	subq	%rax, %rdx
+	movl	%edx, -12(%rbp)
+	movl	-12(%rbp), %eax
+	cmpl	-36(%rbp), %eax
+	jge	.L221
+	movl	-12(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-24(%rbp), %rax
+	movq	192(%rax), %rcx
+	movq	-32(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	memcpy@PLT
+	movq	-24(%rbp), %rax
+	movq	16(%rax), %rcx
+	movl	-36(%rbp), %eax
+	subl	-12(%rbp), %eax
+	movl	%eax, %edx
+	movl	-12(%rbp), %eax
+	movslq	%eax, %rsi
+	movq	-32(%rbp), %rax
+	addq	%rax, %rsi
+	movq	-24(%rbp), %rax
+	movq	40(%rax), %rax
+	movq	%rax, %rdi
+	call	*%rcx
+	movl	%eax, -8(%rbp)
+	movl	-36(%rbp), %eax
+	subl	-12(%rbp), %eax
+	cmpl	%eax, -8(%rbp)
+	sete	%al
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movq	200(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movq	%rdx, 192(%rax)
+	movl	-4(%rbp), %eax
+	jmp	.L222
+.L221:
+	movq	-24(%rbp), %rax
+	movq	192(%rax), %rdx
+	movl	-36(%rbp), %eax
+	cltq
+	addq	%rax, %rdx
+	movq	-24(%rbp), %rax
+	movq	200(%rax), %rax
+	cmpq	%rdx, %rax
+	jb	.L223
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-24(%rbp), %rax
+	movq	192(%rax), %rcx
+	movq	-32(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	memcpy@PLT
+	movq	-24(%rbp), %rax
+	movq	192(%rax), %rdx
+	movl	-36(%rbp), %eax
+	cltq
+	addq	%rax, %rdx
+	movq	-24(%rbp), %rax
+	movq	%rdx, 192(%rax)
+	movl	$1, %eax
+	jmp	.L222
+.L223:
+	movl	$0, %eax
+.L222:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4938:
+	.size	stbi__getn, .-stbi__getn
+	.type	stbi__get16be, @function
+stbi__get16be:
+.LFB4939:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$40, %rsp
+	.cfi_offset 3, -24
+	movq	%rdi, -40(%rbp)
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -20(%rbp)
+	movl	-20(%rbp), %eax
+	sall	$8, %eax
+	movl	%eax, %ebx
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	addl	%ebx, %eax
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4939:
+	.size	stbi__get16be, .-stbi__get16be
+	.type	stbi__get32be, @function
+stbi__get32be:
+.LFB4940:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$40, %rsp
+	.cfi_offset 3, -24
+	movq	%rdi, -40(%rbp)
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, -20(%rbp)
+	movl	-20(%rbp), %eax
+	sall	$16, %eax
+	movl	%eax, %ebx
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	addl	%ebx, %eax
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4940:
+	.size	stbi__get32be, .-stbi__get32be
+	.type	stbi__get16le, @function
+stbi__get16le:
+.LFB4941:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	sall	$8, %eax
+	movl	%eax, %edx
+	movl	-4(%rbp), %eax
+	addl	%edx, %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4941:
+	.size	stbi__get16le, .-stbi__get16le
+	.type	stbi__get32le, @function
+stbi__get32le:
+.LFB4942:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	sall	$16, %eax
+	addl	%eax, -4(%rbp)
+	movl	-4(%rbp), %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4942:
+	.size	stbi__get32le, .-stbi__get32le
+	.type	stbi__compute_y, @function
+stbi__compute_y:
+.LFB4943:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -4(%rbp)
+	movl	%esi, -8(%rbp)
+	movl	%edx, -12(%rbp)
+	movl	-4(%rbp), %eax
+	imull	$77, %eax, %edx
+	movl	-8(%rbp), %eax
+	imull	$150, %eax, %eax
+	addl	%eax, %edx
+	movl	-12(%rbp), %eax
+	imull	$29, %eax, %eax
+	addl	%edx, %eax
+	sarl	$8, %eax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4943:
+	.size	stbi__compute_y, .-stbi__compute_y
+	.section	.rodata
+	.align 8
+.LC4:
+	.string	"req_comp >= 1 && req_comp <= 4"
+.LC5:
+	.string	"0"
+	.text
+	.type	stbi__convert_format, @function
+stbi__convert_format:
+.LFB4944:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -40(%rbp)
+	movl	%esi, -44(%rbp)
+	movl	%edx, -48(%rbp)
+	movl	%ecx, -52(%rbp)
+	movl	%r8d, -56(%rbp)
+	movl	-48(%rbp), %eax
+	cmpl	-44(%rbp), %eax
+	jne	.L235
+	movq	-40(%rbp), %rax
+	jmp	.L236
+.L235:
+	cmpl	$0, -48(%rbp)
+	jle	.L237
+	cmpl	$4, -48(%rbp)
+	jle	.L282
+.L237:
+	leaq	__PRETTY_FUNCTION__.18(%rip), %rax
+	movq	%rax, %rcx
+	movl	$1761, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC4(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L282:
+	movl	-56(%rbp), %edx
+	movl	-52(%rbp), %esi
+	movl	-48(%rbp), %eax
+	movl	$0, %ecx
+	movl	%eax, %edi
+	call	stbi__malloc_mad3
+	movq	%rax, -8(%rbp)
+	cmpq	$0, -8(%rbp)
+	jne	.L239
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L236
+.L239:
+	movl	$0, -28(%rbp)
+	jmp	.L240
+.L280:
+	movl	-28(%rbp), %eax
+	imull	-52(%rbp), %eax
+	movl	%eax, %edx
+	movl	-44(%rbp), %eax
+	imull	%edx, %eax
+	movl	%eax, %edx
+	movq	-40(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -24(%rbp)
+	movl	-28(%rbp), %eax
+	imull	-52(%rbp), %eax
+	movl	%eax, %edx
+	movl	-48(%rbp), %eax
+	imull	%edx, %eax
+	movl	%eax, %edx
+	movq	-8(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -16(%rbp)
+	movl	-44(%rbp), %eax
+	leal	0(,%rax,8), %edx
+	movl	-48(%rbp), %eax
+	addl	%edx, %eax
+	subl	$10, %eax
+	cmpl	$25, %eax
+	ja	.L281
+	movl	%eax, %eax
+	leaq	0(,%rax,4), %rdx
+	leaq	.L243(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	cltq
+	leaq	.L243(%rip), %rdx
+	addq	%rdx, %rax
+	jmp	*%rax
+	.section	.rodata
+	.align 4
+	.align 4
+.L243:
+	.long	.L254-.L243
+	.long	.L253-.L243
+	.long	.L252-.L243
+	.long	.L281-.L243
+	.long	.L281-.L243
+	.long	.L281-.L243
+	.long	.L281-.L243
+	.long	.L251-.L243
+	.long	.L281-.L243
+	.long	.L250-.L243
+	.long	.L249-.L243
+	.long	.L281-.L243
+	.long	.L281-.L243
+	.long	.L281-.L243
+	.long	.L281-.L243
+	.long	.L248-.L243
+	.long	.L247-.L243
+	.long	.L281-.L243
+	.long	.L246-.L243
+	.long	.L281-.L243
+	.long	.L281-.L243
+	.long	.L281-.L243
+	.long	.L281-.L243
+	.long	.L245-.L243
+	.long	.L244-.L243
+	.long	.L242-.L243
+	.text
+.L254:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L255
+.L256:
+	movq	-24(%rbp), %rax
+	movzbl	(%rax), %edx
+	movq	-16(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-16(%rbp), %rax
+	addq	$1, %rax
+	movb	$-1, (%rax)
+	subl	$1, -32(%rbp)
+	addq	$1, -24(%rbp)
+	addq	$2, -16(%rbp)
+.L255:
+	cmpl	$0, -32(%rbp)
+	jns	.L256
+	jmp	.L257
+.L253:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L258
+.L259:
+	movq	-16(%rbp), %rax
+	leaq	2(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	movq	-16(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rdx), %edx
+	movb	%dl, (%rax)
+	movzbl	(%rax), %edx
+	movq	-16(%rbp), %rax
+	movb	%dl, (%rax)
+	subl	$1, -32(%rbp)
+	addq	$1, -24(%rbp)
+	addq	$3, -16(%rbp)
+.L258:
+	cmpl	$0, -32(%rbp)
+	jns	.L259
+	jmp	.L257
+.L252:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L260
+.L261:
+	movq	-16(%rbp), %rax
+	leaq	2(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	movq	-16(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rdx), %edx
+	movb	%dl, (%rax)
+	movzbl	(%rax), %edx
+	movq	-16(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-16(%rbp), %rax
+	addq	$3, %rax
+	movb	$-1, (%rax)
+	subl	$1, -32(%rbp)
+	addq	$1, -24(%rbp)
+	addq	$4, -16(%rbp)
+.L260:
+	cmpl	$0, -32(%rbp)
+	jns	.L261
+	jmp	.L257
+.L251:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L262
+.L263:
+	movq	-24(%rbp), %rax
+	movzbl	(%rax), %edx
+	movq	-16(%rbp), %rax
+	movb	%dl, (%rax)
+	subl	$1, -32(%rbp)
+	addq	$2, -24(%rbp)
+	addq	$1, -16(%rbp)
+.L262:
+	cmpl	$0, -32(%rbp)
+	jns	.L263
+	jmp	.L257
+.L250:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L264
+.L265:
+	movq	-16(%rbp), %rax
+	leaq	2(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	movq	-16(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rdx), %edx
+	movb	%dl, (%rax)
+	movzbl	(%rax), %edx
+	movq	-16(%rbp), %rax
+	movb	%dl, (%rax)
+	subl	$1, -32(%rbp)
+	addq	$2, -24(%rbp)
+	addq	$3, -16(%rbp)
+.L264:
+	cmpl	$0, -32(%rbp)
+	jns	.L265
+	jmp	.L257
+.L249:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L266
+.L267:
+	movq	-16(%rbp), %rax
+	leaq	2(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	movq	-16(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rdx), %edx
+	movb	%dl, (%rax)
+	movzbl	(%rax), %edx
+	movq	-16(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-16(%rbp), %rax
+	leaq	3(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzbl	1(%rax), %eax
+	movb	%al, (%rdx)
+	subl	$1, -32(%rbp)
+	addq	$2, -24(%rbp)
+	addq	$4, -16(%rbp)
+.L266:
+	cmpl	$0, -32(%rbp)
+	jns	.L267
+	jmp	.L257
+.L246:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L268
+.L269:
+	movq	-24(%rbp), %rax
+	movzbl	(%rax), %edx
+	movq	-16(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-16(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzbl	1(%rax), %eax
+	movb	%al, (%rdx)
+	movq	-16(%rbp), %rax
+	leaq	2(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzbl	2(%rax), %eax
+	movb	%al, (%rdx)
+	movq	-16(%rbp), %rax
+	addq	$3, %rax
+	movb	$-1, (%rax)
+	subl	$1, -32(%rbp)
+	addq	$3, -24(%rbp)
+	addq	$4, -16(%rbp)
+.L268:
+	cmpl	$0, -32(%rbp)
+	jns	.L269
+	jmp	.L257
+.L248:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L270
+.L271:
+	movq	-24(%rbp), %rax
+	addq	$2, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movq	-24(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %ecx
+	movq	-24(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__compute_y
+	movq	-16(%rbp), %rdx
+	movb	%al, (%rdx)
+	subl	$1, -32(%rbp)
+	addq	$3, -24(%rbp)
+	addq	$1, -16(%rbp)
+.L270:
+	cmpl	$0, -32(%rbp)
+	jns	.L271
+	jmp	.L257
+.L247:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L272
+.L273:
+	movq	-24(%rbp), %rax
+	addq	$2, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movq	-24(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %ecx
+	movq	-24(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__compute_y
+	movq	-16(%rbp), %rdx
+	movb	%al, (%rdx)
+	movq	-16(%rbp), %rax
+	addq	$1, %rax
+	movb	$-1, (%rax)
+	subl	$1, -32(%rbp)
+	addq	$3, -24(%rbp)
+	addq	$2, -16(%rbp)
+.L272:
+	cmpl	$0, -32(%rbp)
+	jns	.L273
+	jmp	.L257
+.L245:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L274
+.L275:
+	movq	-24(%rbp), %rax
+	addq	$2, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movq	-24(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %ecx
+	movq	-24(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__compute_y
+	movq	-16(%rbp), %rdx
+	movb	%al, (%rdx)
+	subl	$1, -32(%rbp)
+	addq	$4, -24(%rbp)
+	addq	$1, -16(%rbp)
+.L274:
+	cmpl	$0, -32(%rbp)
+	jns	.L275
+	jmp	.L257
+.L244:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L276
+.L277:
+	movq	-24(%rbp), %rax
+	addq	$2, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movq	-24(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %ecx
+	movq	-24(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__compute_y
+	movq	-16(%rbp), %rdx
+	movb	%al, (%rdx)
+	movq	-16(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzbl	3(%rax), %eax
+	movb	%al, (%rdx)
+	subl	$1, -32(%rbp)
+	addq	$4, -24(%rbp)
+	addq	$2, -16(%rbp)
+.L276:
+	cmpl	$0, -32(%rbp)
+	jns	.L277
+	jmp	.L257
+.L242:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L278
+.L279:
+	movq	-24(%rbp), %rax
+	movzbl	(%rax), %edx
+	movq	-16(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-16(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzbl	1(%rax), %eax
+	movb	%al, (%rdx)
+	movq	-16(%rbp), %rax
+	leaq	2(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzbl	2(%rax), %eax
+	movb	%al, (%rdx)
+	subl	$1, -32(%rbp)
+	addq	$4, -24(%rbp)
+	addq	$3, -16(%rbp)
+.L278:
+	cmpl	$0, -32(%rbp)
+	jns	.L279
+	jmp	.L257
+.L281:
+	leaq	__PRETTY_FUNCTION__.18(%rip), %rax
+	movq	%rax, %rcx
+	movl	$1790, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC5(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L257:
+	addl	$1, -28(%rbp)
+.L240:
+	movl	-56(%rbp), %eax
+	cmpl	%eax, -28(%rbp)
+	jl	.L280
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-8(%rbp), %rax
+.L236:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4944:
+	.size	stbi__convert_format, .-stbi__convert_format
+	.type	stbi__compute_y_16, @function
+stbi__compute_y_16:
+.LFB4945:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -4(%rbp)
+	movl	%esi, -8(%rbp)
+	movl	%edx, -12(%rbp)
+	movl	-4(%rbp), %eax
+	imull	$77, %eax, %edx
+	movl	-8(%rbp), %eax
+	imull	$150, %eax, %eax
+	addl	%eax, %edx
+	movl	-12(%rbp), %eax
+	imull	$29, %eax, %eax
+	addl	%edx, %eax
+	sarl	$8, %eax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4945:
+	.size	stbi__compute_y_16, .-stbi__compute_y_16
+	.type	stbi__convert_format16, @function
+stbi__convert_format16:
+.LFB4946:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -40(%rbp)
+	movl	%esi, -44(%rbp)
+	movl	%edx, -48(%rbp)
+	movl	%ecx, -52(%rbp)
+	movl	%r8d, -56(%rbp)
+	movl	-48(%rbp), %eax
+	cmpl	-44(%rbp), %eax
+	jne	.L286
+	movq	-40(%rbp), %rax
+	jmp	.L287
+.L286:
+	cmpl	$0, -48(%rbp)
+	jle	.L288
+	cmpl	$4, -48(%rbp)
+	jle	.L333
+.L288:
+	leaq	__PRETTY_FUNCTION__.17(%rip), %rax
+	movq	%rax, %rcx
+	movl	$1818, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC4(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L333:
+	movl	-48(%rbp), %eax
+	imull	-52(%rbp), %eax
+	imull	-56(%rbp), %eax
+	addl	%eax, %eax
+	movl	%eax, %eax
+	movq	%rax, %rdi
+	call	stbi__malloc
+	movq	%rax, -8(%rbp)
+	cmpq	$0, -8(%rbp)
+	jne	.L290
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L287
+.L290:
+	movl	$0, -28(%rbp)
+	jmp	.L291
+.L331:
+	movl	-28(%rbp), %eax
+	imull	-52(%rbp), %eax
+	movl	%eax, %edx
+	movl	-44(%rbp), %eax
+	imull	%edx, %eax
+	movl	%eax, %eax
+	leaq	(%rax,%rax), %rdx
+	movq	-40(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -24(%rbp)
+	movl	-28(%rbp), %eax
+	imull	-52(%rbp), %eax
+	movl	%eax, %edx
+	movl	-48(%rbp), %eax
+	imull	%edx, %eax
+	movl	%eax, %eax
+	leaq	(%rax,%rax), %rdx
+	movq	-8(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -16(%rbp)
+	movl	-44(%rbp), %eax
+	leal	0(,%rax,8), %edx
+	movl	-48(%rbp), %eax
+	addl	%edx, %eax
+	subl	$10, %eax
+	cmpl	$25, %eax
+	ja	.L332
+	movl	%eax, %eax
+	leaq	0(,%rax,4), %rdx
+	leaq	.L294(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	cltq
+	leaq	.L294(%rip), %rdx
+	addq	%rdx, %rax
+	jmp	*%rax
+	.section	.rodata
+	.align 4
+	.align 4
+.L294:
+	.long	.L305-.L294
+	.long	.L304-.L294
+	.long	.L303-.L294
+	.long	.L332-.L294
+	.long	.L332-.L294
+	.long	.L332-.L294
+	.long	.L332-.L294
+	.long	.L302-.L294
+	.long	.L332-.L294
+	.long	.L301-.L294
+	.long	.L300-.L294
+	.long	.L332-.L294
+	.long	.L332-.L294
+	.long	.L332-.L294
+	.long	.L332-.L294
+	.long	.L299-.L294
+	.long	.L298-.L294
+	.long	.L332-.L294
+	.long	.L297-.L294
+	.long	.L332-.L294
+	.long	.L332-.L294
+	.long	.L332-.L294
+	.long	.L332-.L294
+	.long	.L296-.L294
+	.long	.L295-.L294
+	.long	.L293-.L294
+	.text
+.L305:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L306
+.L307:
+	movq	-24(%rbp), %rax
+	movzwl	(%rax), %edx
+	movq	-16(%rbp), %rax
+	movw	%dx, (%rax)
+	movq	-16(%rbp), %rax
+	addq	$2, %rax
+	movw	$-1, (%rax)
+	subl	$1, -32(%rbp)
+	addq	$2, -24(%rbp)
+	addq	$4, -16(%rbp)
+.L306:
+	cmpl	$0, -32(%rbp)
+	jns	.L307
+	jmp	.L308
+.L304:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L309
+.L310:
+	movq	-16(%rbp), %rax
+	leaq	4(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzwl	(%rax), %eax
+	movw	%ax, (%rdx)
+	movq	-16(%rbp), %rax
+	addq	$2, %rax
+	movzwl	(%rdx), %edx
+	movw	%dx, (%rax)
+	movzwl	(%rax), %edx
+	movq	-16(%rbp), %rax
+	movw	%dx, (%rax)
+	subl	$1, -32(%rbp)
+	addq	$2, -24(%rbp)
+	addq	$6, -16(%rbp)
+.L309:
+	cmpl	$0, -32(%rbp)
+	jns	.L310
+	jmp	.L308
+.L303:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L311
+.L312:
+	movq	-16(%rbp), %rax
+	leaq	4(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzwl	(%rax), %eax
+	movw	%ax, (%rdx)
+	movq	-16(%rbp), %rax
+	addq	$2, %rax
+	movzwl	(%rdx), %edx
+	movw	%dx, (%rax)
+	movzwl	(%rax), %edx
+	movq	-16(%rbp), %rax
+	movw	%dx, (%rax)
+	movq	-16(%rbp), %rax
+	addq	$6, %rax
+	movw	$-1, (%rax)
+	subl	$1, -32(%rbp)
+	addq	$2, -24(%rbp)
+	addq	$8, -16(%rbp)
+.L311:
+	cmpl	$0, -32(%rbp)
+	jns	.L312
+	jmp	.L308
+.L302:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L313
+.L314:
+	movq	-24(%rbp), %rax
+	movzwl	(%rax), %edx
+	movq	-16(%rbp), %rax
+	movw	%dx, (%rax)
+	subl	$1, -32(%rbp)
+	addq	$4, -24(%rbp)
+	addq	$2, -16(%rbp)
+.L313:
+	cmpl	$0, -32(%rbp)
+	jns	.L314
+	jmp	.L308
+.L301:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L315
+.L316:
+	movq	-16(%rbp), %rax
+	leaq	4(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzwl	(%rax), %eax
+	movw	%ax, (%rdx)
+	movq	-16(%rbp), %rax
+	addq	$2, %rax
+	movzwl	(%rdx), %edx
+	movw	%dx, (%rax)
+	movzwl	(%rax), %edx
+	movq	-16(%rbp), %rax
+	movw	%dx, (%rax)
+	subl	$1, -32(%rbp)
+	addq	$4, -24(%rbp)
+	addq	$6, -16(%rbp)
+.L315:
+	cmpl	$0, -32(%rbp)
+	jns	.L316
+	jmp	.L308
+.L300:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L317
+.L318:
+	movq	-16(%rbp), %rax
+	leaq	4(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzwl	(%rax), %eax
+	movw	%ax, (%rdx)
+	movq	-16(%rbp), %rax
+	addq	$2, %rax
+	movzwl	(%rdx), %edx
+	movw	%dx, (%rax)
+	movzwl	(%rax), %edx
+	movq	-16(%rbp), %rax
+	movw	%dx, (%rax)
+	movq	-16(%rbp), %rax
+	leaq	6(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzwl	2(%rax), %eax
+	movw	%ax, (%rdx)
+	subl	$1, -32(%rbp)
+	addq	$4, -24(%rbp)
+	addq	$8, -16(%rbp)
+.L317:
+	cmpl	$0, -32(%rbp)
+	jns	.L318
+	jmp	.L308
+.L297:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L319
+.L320:
+	movq	-24(%rbp), %rax
+	movzwl	(%rax), %edx
+	movq	-16(%rbp), %rax
+	movw	%dx, (%rax)
+	movq	-16(%rbp), %rax
+	leaq	2(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzwl	2(%rax), %eax
+	movw	%ax, (%rdx)
+	movq	-16(%rbp), %rax
+	leaq	4(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzwl	4(%rax), %eax
+	movw	%ax, (%rdx)
+	movq	-16(%rbp), %rax
+	addq	$6, %rax
+	movw	$-1, (%rax)
+	subl	$1, -32(%rbp)
+	addq	$6, -24(%rbp)
+	addq	$8, -16(%rbp)
+.L319:
+	cmpl	$0, -32(%rbp)
+	jns	.L320
+	jmp	.L308
+.L299:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L321
+.L322:
+	movq	-24(%rbp), %rax
+	addq	$4, %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %edx
+	movq	-24(%rbp), %rax
+	addq	$2, %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %ecx
+	movq	-24(%rbp), %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %eax
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__compute_y_16
+	movq	-16(%rbp), %rdx
+	movw	%ax, (%rdx)
+	subl	$1, -32(%rbp)
+	addq	$6, -24(%rbp)
+	addq	$2, -16(%rbp)
+.L321:
+	cmpl	$0, -32(%rbp)
+	jns	.L322
+	jmp	.L308
+.L298:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L323
+.L324:
+	movq	-24(%rbp), %rax
+	addq	$4, %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %edx
+	movq	-24(%rbp), %rax
+	addq	$2, %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %ecx
+	movq	-24(%rbp), %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %eax
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__compute_y_16
+	movq	-16(%rbp), %rdx
+	movw	%ax, (%rdx)
+	movq	-16(%rbp), %rax
+	addq	$2, %rax
+	movw	$-1, (%rax)
+	subl	$1, -32(%rbp)
+	addq	$6, -24(%rbp)
+	addq	$4, -16(%rbp)
+.L323:
+	cmpl	$0, -32(%rbp)
+	jns	.L324
+	jmp	.L308
+.L296:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L325
+.L326:
+	movq	-24(%rbp), %rax
+	addq	$4, %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %edx
+	movq	-24(%rbp), %rax
+	addq	$2, %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %ecx
+	movq	-24(%rbp), %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %eax
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__compute_y_16
+	movq	-16(%rbp), %rdx
+	movw	%ax, (%rdx)
+	subl	$1, -32(%rbp)
+	addq	$8, -24(%rbp)
+	addq	$2, -16(%rbp)
+.L325:
+	cmpl	$0, -32(%rbp)
+	jns	.L326
+	jmp	.L308
+.L295:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L327
+.L328:
+	movq	-24(%rbp), %rax
+	addq	$4, %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %edx
+	movq	-24(%rbp), %rax
+	addq	$2, %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %ecx
+	movq	-24(%rbp), %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %eax
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__compute_y_16
+	movq	-16(%rbp), %rdx
+	movw	%ax, (%rdx)
+	movq	-16(%rbp), %rax
+	leaq	2(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzwl	6(%rax), %eax
+	movw	%ax, (%rdx)
+	subl	$1, -32(%rbp)
+	addq	$8, -24(%rbp)
+	addq	$4, -16(%rbp)
+.L327:
+	cmpl	$0, -32(%rbp)
+	jns	.L328
+	jmp	.L308
+.L293:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -32(%rbp)
+	jmp	.L329
+.L330:
+	movq	-24(%rbp), %rax
+	movzwl	(%rax), %edx
+	movq	-16(%rbp), %rax
+	movw	%dx, (%rax)
+	movq	-16(%rbp), %rax
+	leaq	2(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzwl	2(%rax), %eax
+	movw	%ax, (%rdx)
+	movq	-16(%rbp), %rax
+	leaq	4(%rax), %rdx
+	movq	-24(%rbp), %rax
+	movzwl	4(%rax), %eax
+	movw	%ax, (%rdx)
+	subl	$1, -32(%rbp)
+	addq	$8, -24(%rbp)
+	addq	$6, -16(%rbp)
+.L329:
+	cmpl	$0, -32(%rbp)
+	jns	.L330
+	jmp	.L308
+.L332:
+	leaq	__PRETTY_FUNCTION__.17(%rip), %rax
+	movq	%rax, %rcx
+	movl	$1847, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC5(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L308:
+	addl	$1, -28(%rbp)
+.L291:
+	movl	-56(%rbp), %eax
+	cmpl	%eax, -28(%rbp)
+	jl	.L331
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-8(%rbp), %rax
+.L287:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4946:
+	.size	stbi__convert_format16, .-stbi__convert_format16
+	.type	stbi__ldr_to_hdr, @function
+stbi__ldr_to_hdr:
+.LFB4947:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -40(%rbp)
+	movl	%esi, -44(%rbp)
+	movl	%edx, -48(%rbp)
+	movl	%ecx, -52(%rbp)
+	cmpq	$0, -40(%rbp)
+	jne	.L335
+	movl	$0, %eax
+	jmp	.L336
+.L335:
+	movl	-52(%rbp), %edx
+	movl	-48(%rbp), %esi
+	movl	-44(%rbp), %eax
+	movl	$0, %r8d
+	movl	$4, %ecx
+	movl	%eax, %edi
+	call	stbi__malloc_mad4
+	movq	%rax, -8(%rbp)
+	cmpq	$0, -8(%rbp)
+	jne	.L337
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L336
+.L337:
+	movl	-52(%rbp), %eax
+	andl	$1, %eax
+	testl	%eax, %eax
+	je	.L338
+	movl	-52(%rbp), %eax
+	movl	%eax, -12(%rbp)
+	jmp	.L339
+.L338:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -12(%rbp)
+.L339:
+	movl	$0, -20(%rbp)
+	jmp	.L340
+.L343:
+	movl	$0, -16(%rbp)
+	jmp	.L341
+.L342:
+	movss	stbi__l2h_gamma(%rip), %xmm0
+	pxor	%xmm2, %xmm2
+	cvtss2sd	%xmm0, %xmm2
+	movl	-20(%rbp), %eax
+	imull	-52(%rbp), %eax
+	movl	%eax, %edx
+	movl	-16(%rbp), %eax
+	addl	%edx, %eax
+	movslq	%eax, %rdx
+	movq	-40(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	pxor	%xmm0, %xmm0
+	cvtsi2ssl	%eax, %xmm0
+	movss	.LC6(%rip), %xmm1
+	divss	%xmm1, %xmm0
+	pxor	%xmm3, %xmm3
+	cvtss2sd	%xmm0, %xmm3
+	movq	%xmm3, %rax
+	movapd	%xmm2, %xmm1
+	movq	%rax, %xmm0
+	call	pow@PLT
+	movss	stbi__l2h_scale(%rip), %xmm1
+	cvtss2sd	%xmm1, %xmm1
+	mulsd	%xmm1, %xmm0
+	movl	-20(%rbp), %eax
+	imull	-52(%rbp), %eax
+	movl	%eax, %edx
+	movl	-16(%rbp), %eax
+	addl	%edx, %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	movq	-8(%rbp), %rax
+	addq	%rdx, %rax
+	cvtsd2ss	%xmm0, %xmm0
+	movss	%xmm0, (%rax)
+	addl	$1, -16(%rbp)
+.L341:
+	movl	-16(%rbp), %eax
+	cmpl	-12(%rbp), %eax
+	jl	.L342
+	addl	$1, -20(%rbp)
+.L340:
+	movl	-44(%rbp), %eax
+	imull	-48(%rbp), %eax
+	cmpl	%eax, -20(%rbp)
+	jl	.L343
+	movl	-12(%rbp), %eax
+	cmpl	-52(%rbp), %eax
+	jge	.L344
+	movl	$0, -20(%rbp)
+	jmp	.L345
+.L346:
+	movl	-20(%rbp), %eax
+	imull	-52(%rbp), %eax
+	movl	%eax, %edx
+	movl	-12(%rbp), %eax
+	addl	%edx, %eax
+	movslq	%eax, %rdx
+	movq	-40(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	pxor	%xmm0, %xmm0
+	cvtsi2ssl	%eax, %xmm0
+	movl	-20(%rbp), %eax
+	imull	-52(%rbp), %eax
+	movl	%eax, %edx
+	movl	-12(%rbp), %eax
+	addl	%edx, %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	movq	-8(%rbp), %rax
+	addq	%rdx, %rax
+	movss	.LC6(%rip), %xmm1
+	divss	%xmm1, %xmm0
+	movss	%xmm0, (%rax)
+	addl	$1, -20(%rbp)
+.L345:
+	movl	-44(%rbp), %eax
+	imull	-48(%rbp), %eax
+	cmpl	%eax, -20(%rbp)
+	jl	.L346
+.L344:
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-8(%rbp), %rax
+.L336:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4947:
+	.size	stbi__ldr_to_hdr, .-stbi__ldr_to_hdr
+	.type	stbi__hdr_to_ldr, @function
+stbi__hdr_to_ldr:
+.LFB4948:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -40(%rbp)
+	movl	%esi, -44(%rbp)
+	movl	%edx, -48(%rbp)
+	movl	%ecx, -52(%rbp)
+	cmpq	$0, -40(%rbp)
+	jne	.L348
+	movl	$0, %eax
+	jmp	.L349
+.L348:
+	movl	-52(%rbp), %edx
+	movl	-48(%rbp), %esi
+	movl	-44(%rbp), %eax
+	movl	$0, %ecx
+	movl	%eax, %edi
+	call	stbi__malloc_mad3
+	movq	%rax, -8(%rbp)
+	cmpq	$0, -8(%rbp)
+	jne	.L350
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L349
+.L350:
+	movl	-52(%rbp), %eax
+	andl	$1, %eax
+	testl	%eax, %eax
+	je	.L351
+	movl	-52(%rbp), %eax
+	movl	%eax, -20(%rbp)
+	jmp	.L352
+.L351:
+	movl	-52(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -20(%rbp)
+.L352:
+	movl	$0, -28(%rbp)
+	jmp	.L353
+.L365:
+	movl	$0, -24(%rbp)
+	jmp	.L354
+.L359:
+	movss	stbi__h2l_gamma_i(%rip), %xmm0
+	pxor	%xmm2, %xmm2
+	cvtss2sd	%xmm0, %xmm2
+	movl	-28(%rbp), %eax
+	imull	-52(%rbp), %eax
+	movl	%eax, %edx
+	movl	-24(%rbp), %eax
+	addl	%edx, %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	movq	-40(%rbp), %rax
+	addq	%rdx, %rax
+	movss	(%rax), %xmm1
+	movss	stbi__h2l_scale_i(%rip), %xmm0
+	mulss	%xmm1, %xmm0
+	pxor	%xmm3, %xmm3
+	cvtss2sd	%xmm0, %xmm3
+	movq	%xmm3, %rax
+	movapd	%xmm2, %xmm1
+	movq	%rax, %xmm0
+	call	pow@PLT
+	pxor	%xmm1, %xmm1
+	cvtsd2ss	%xmm0, %xmm1
+	movss	.LC6(%rip), %xmm0
+	mulss	%xmm0, %xmm1
+	movss	.LC7(%rip), %xmm0
+	addss	%xmm1, %xmm0
+	movss	%xmm0, -16(%rbp)
+	pxor	%xmm0, %xmm0
+	comiss	-16(%rbp), %xmm0
+	jbe	.L355
+	pxor	%xmm0, %xmm0
+	movss	%xmm0, -16(%rbp)
+.L355:
+	movss	-16(%rbp), %xmm0
+	comiss	.LC6(%rip), %xmm0
+	jbe	.L357
+	movss	.LC6(%rip), %xmm0
+	movss	%xmm0, -16(%rbp)
+.L357:
+	movss	-16(%rbp), %xmm0
+	cvttss2sil	%xmm0, %ecx
+	movl	-28(%rbp), %eax
+	imull	-52(%rbp), %eax
+	movl	%eax, %edx
+	movl	-24(%rbp), %eax
+	addl	%edx, %eax
+	movslq	%eax, %rdx
+	movq	-8(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	addl	$1, -24(%rbp)
+.L354:
+	movl	-24(%rbp), %eax
+	cmpl	-20(%rbp), %eax
+	jl	.L359
+	movl	-24(%rbp), %eax
+	cmpl	-52(%rbp), %eax
+	jge	.L360
+	movl	-28(%rbp), %eax
+	imull	-52(%rbp), %eax
+	movl	%eax, %edx
+	movl	-24(%rbp), %eax
+	addl	%edx, %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	movq	-40(%rbp), %rax
+	addq	%rdx, %rax
+	movss	(%rax), %xmm1
+	movss	.LC6(%rip), %xmm0
+	mulss	%xmm0, %xmm1
+	movss	.LC7(%rip), %xmm0
+	addss	%xmm1, %xmm0
+	movss	%xmm0, -12(%rbp)
+	pxor	%xmm0, %xmm0
+	comiss	-12(%rbp), %xmm0
+	jbe	.L361
+	pxor	%xmm0, %xmm0
+	movss	%xmm0, -12(%rbp)
+.L361:
+	movss	-12(%rbp), %xmm0
+	comiss	.LC6(%rip), %xmm0
+	jbe	.L363
+	movss	.LC6(%rip), %xmm0
+	movss	%xmm0, -12(%rbp)
+.L363:
+	movss	-12(%rbp), %xmm0
+	cvttss2sil	%xmm0, %ecx
+	movl	-28(%rbp), %eax
+	imull	-52(%rbp), %eax
+	movl	%eax, %edx
+	movl	-24(%rbp), %eax
+	addl	%edx, %eax
+	movslq	%eax, %rdx
+	movq	-8(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+.L360:
+	addl	$1, -28(%rbp)
+.L353:
+	movl	-44(%rbp), %eax
+	imull	-48(%rbp), %eax
+	cmpl	%eax, -28(%rbp)
+	jl	.L365
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-8(%rbp), %rax
+.L349:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4948:
+	.size	stbi__hdr_to_ldr, .-stbi__hdr_to_ldr
+	.section	.rodata
+.LC9:
+	.string	"bad size list"
+.LC10:
+	.string	"bad code lengths"
+	.text
+	.type	stbi__build_huffman, @function
+stbi__build_huffman:
+.LFB4949:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -40(%rbp)
+	movq	%rsi, -48(%rbp)
+	movl	$0, -20(%rbp)
+	movl	$0, -28(%rbp)
+	jmp	.L371
+.L376:
+	movl	$0, -24(%rbp)
+	jmp	.L372
+.L375:
+	movl	-28(%rbp), %eax
+	movl	%eax, %ecx
+	movl	-20(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -20(%rbp)
+	addl	$1, %ecx
+	movq	-40(%rbp), %rdx
+	cltq
+	movb	%cl, 1280(%rdx,%rax)
+	cmpl	$256, -20(%rbp)
+	jle	.L373
+	leaq	.LC9(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L374
+.L373:
+	addl	$1, -24(%rbp)
+.L372:
+	movl	-28(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	movq	-48(%rbp), %rax
+	addq	%rdx, %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -24(%rbp)
+	jl	.L375
+	addl	$1, -28(%rbp)
+.L371:
+	cmpl	$15, -28(%rbp)
+	jle	.L376
+	movq	-40(%rbp), %rdx
+	movl	-20(%rbp), %eax
+	cltq
+	movb	$0, 1280(%rdx,%rax)
+	movl	$0, -16(%rbp)
+	movl	$0, -20(%rbp)
+	movl	$1, -24(%rbp)
+	jmp	.L377
+.L381:
+	movl	-20(%rbp), %eax
+	subl	-16(%rbp), %eax
+	movl	%eax, %ecx
+	movq	-40(%rbp), %rax
+	movl	-24(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$400, %rdx
+	movl	%ecx, 12(%rax,%rdx,4)
+	movq	-40(%rbp), %rdx
+	movl	-20(%rbp), %eax
+	cltq
+	movzbl	1280(%rdx,%rax), %eax
+	movzbl	%al, %eax
+	cmpl	%eax, -24(%rbp)
+	jne	.L378
+	jmp	.L379
+.L380:
+	movl	-16(%rbp), %edx
+	leal	1(%rdx), %eax
+	movl	%eax, -16(%rbp)
+	movl	-20(%rbp), %eax
+	leal	1(%rax), %ecx
+	movl	%ecx, -20(%rbp)
+	movl	%edx, %ecx
+	movq	-40(%rbp), %rdx
+	cltq
+	addq	$256, %rax
+	movw	%cx, (%rdx,%rax,2)
+.L379:
+	movq	-40(%rbp), %rdx
+	movl	-20(%rbp), %eax
+	cltq
+	movzbl	1280(%rdx,%rax), %eax
+	movzbl	%al, %eax
+	cmpl	%eax, -24(%rbp)
+	je	.L380
+	movl	-16(%rbp), %eax
+	leal	-1(%rax), %edx
+	movl	-24(%rbp), %eax
+	movl	%eax, %ecx
+	shrl	%cl, %edx
+	movl	%edx, %eax
+	testl	%eax, %eax
+	je	.L378
+	leaq	.LC10(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L374
+.L378:
+	movl	$16, %eax
+	subl	-24(%rbp), %eax
+	movl	-16(%rbp), %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	%edx, %ecx
+	movq	-40(%rbp), %rax
+	movl	-24(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$384, %rdx
+	movl	%ecx, 4(%rax,%rdx,4)
+	sall	-16(%rbp)
+	addl	$1, -24(%rbp)
+.L377:
+	cmpl	$16, -24(%rbp)
+	jle	.L381
+	movq	-40(%rbp), %rax
+	movl	-24(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$384, %rdx
+	movl	$-1, 4(%rax,%rdx,4)
+	movq	-40(%rbp), %rax
+	movl	$512, %edx
+	movl	$255, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+	movl	$0, -28(%rbp)
+	jmp	.L382
+.L386:
+	movq	-40(%rbp), %rdx
+	movl	-28(%rbp), %eax
+	cltq
+	movzbl	1280(%rdx,%rax), %eax
+	movzbl	%al, %eax
+	movl	%eax, -12(%rbp)
+	cmpl	$9, -12(%rbp)
+	jg	.L383
+	movq	-40(%rbp), %rax
+	movl	-28(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$256, %rdx
+	movzwl	(%rax,%rdx,2), %eax
+	movzwl	%ax, %edx
+	movl	$9, %eax
+	subl	-12(%rbp), %eax
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	%edx, %eax
+	movl	%eax, -8(%rbp)
+	movl	$9, %eax
+	subl	-12(%rbp), %eax
+	movl	$1, %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	%edx, %eax
+	movl	%eax, -4(%rbp)
+	movl	$0, -24(%rbp)
+	jmp	.L384
+.L385:
+	movl	-8(%rbp), %edx
+	movl	-24(%rbp), %eax
+	addl	%edx, %eax
+	movl	-28(%rbp), %edx
+	movl	%edx, %ecx
+	movq	-40(%rbp), %rdx
+	cltq
+	movb	%cl, (%rdx,%rax)
+	addl	$1, -24(%rbp)
+.L384:
+	movl	-24(%rbp), %eax
+	cmpl	-4(%rbp), %eax
+	jl	.L385
+.L383:
+	addl	$1, -28(%rbp)
+.L382:
+	movl	-28(%rbp), %eax
+	cmpl	-20(%rbp), %eax
+	jl	.L386
+	movl	$1, %eax
+.L374:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4949:
+	.size	stbi__build_huffman, .-stbi__build_huffman
+	.type	stbi__build_fast_ac, @function
+stbi__build_fast_ac:
+.LFB4950:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -40(%rbp)
+	movq	%rsi, -48(%rbp)
+	movl	$0, -28(%rbp)
+	jmp	.L388
+.L391:
+	movq	-48(%rbp), %rdx
+	movl	-28(%rbp), %eax
+	cltq
+	movzbl	(%rdx,%rax), %eax
+	movb	%al, -29(%rbp)
+	movl	-28(%rbp), %eax
+	cltq
+	leaq	(%rax,%rax), %rdx
+	movq	-40(%rbp), %rax
+	addq	%rdx, %rax
+	movw	$0, (%rax)
+	cmpb	$-1, -29(%rbp)
+	je	.L389
+	movzbl	-29(%rbp), %eax
+	movq	-48(%rbp), %rdx
+	cltq
+	movzbl	1024(%rdx,%rax), %eax
+	movzbl	%al, %eax
+	movl	%eax, -20(%rbp)
+	movl	-20(%rbp), %eax
+	sarl	$4, %eax
+	andl	$15, %eax
+	movl	%eax, -16(%rbp)
+	movl	-20(%rbp), %eax
+	andl	$15, %eax
+	movl	%eax, -12(%rbp)
+	movzbl	-29(%rbp), %eax
+	movq	-48(%rbp), %rdx
+	cltq
+	movzbl	1280(%rdx,%rax), %eax
+	movzbl	%al, %eax
+	movl	%eax, -8(%rbp)
+	cmpl	$0, -12(%rbp)
+	je	.L389
+	movl	-8(%rbp), %edx
+	movl	-12(%rbp), %eax
+	addl	%edx, %eax
+	cmpl	$9, %eax
+	jg	.L389
+	movl	-8(%rbp), %eax
+	movl	-28(%rbp), %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	%edx, %eax
+	andl	$511, %eax
+	movl	%eax, %edx
+	movl	$9, %eax
+	subl	-12(%rbp), %eax
+	movl	%eax, %ecx
+	sarl	%cl, %edx
+	movl	%edx, %eax
+	movl	%eax, -24(%rbp)
+	movl	-12(%rbp), %eax
+	subl	$1, %eax
+	movl	$1, %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	%edx, %eax
+	movl	%eax, -4(%rbp)
+	movl	-24(%rbp), %eax
+	cmpl	-4(%rbp), %eax
+	jge	.L390
+	movl	-12(%rbp), %eax
+	movl	$-1, %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	-24(%rbp), %eax
+	addl	%edx, %eax
+	addl	$1, %eax
+	movl	%eax, -24(%rbp)
+.L390:
+	cmpl	$-128, -24(%rbp)
+	jl	.L389
+	cmpl	$127, -24(%rbp)
+	jg	.L389
+	movl	-24(%rbp), %eax
+	sall	$4, %eax
+	movl	%eax, %edx
+	movl	-16(%rbp), %eax
+	addl	%edx, %eax
+	sall	$4, %eax
+	movl	%eax, %edx
+	movl	-8(%rbp), %eax
+	movl	%eax, %ecx
+	movl	-12(%rbp), %eax
+	addl	%ecx, %eax
+	leal	(%rdx,%rax), %ecx
+	movl	-28(%rbp), %eax
+	cltq
+	leaq	(%rax,%rax), %rdx
+	movq	-40(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movw	%dx, (%rax)
+.L389:
+	addl	$1, -28(%rbp)
+.L388:
+	cmpl	$511, -28(%rbp)
+	jle	.L391
+	nop
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4950:
+	.size	stbi__build_fast_ac, .-stbi__build_fast_ac
+	.type	stbi__grow_buffer_unsafe, @function
+stbi__grow_buffer_unsafe:
+.LFB4951:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+.L399:
+	movq	-24(%rbp), %rax
+	movl	18476(%rax), %eax
+	testl	%eax, %eax
+	jne	.L393
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	jmp	.L394
+.L393:
+	movl	$0, %eax
+.L394:
+	movl	%eax, -4(%rbp)
+	cmpl	$255, -4(%rbp)
+	jne	.L395
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -8(%rbp)
+	jmp	.L396
+.L397:
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -8(%rbp)
+.L396:
+	cmpl	$255, -8(%rbp)
+	je	.L397
+	cmpl	$0, -8(%rbp)
+	je	.L395
+	movl	-8(%rbp), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movb	%dl, 18472(%rax)
+	movq	-24(%rbp), %rax
+	movl	$1, 18476(%rax)
+	jmp	.L392
+.L395:
+	movq	-24(%rbp), %rax
+	movl	18464(%rax), %edx
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	movl	$24, %ecx
+	subl	%eax, %ecx
+	movl	-4(%rbp), %eax
+	sall	%cl, %eax
+	orl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 18464(%rax)
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	leal	8(%rax), %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 18468(%rax)
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	$24, %eax
+	jle	.L399
+.L392:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4951:
+	.size	stbi__grow_buffer_unsafe, .-stbi__grow_buffer_unsafe
+	.section	.rodata
+	.align 32
+	.type	stbi__bmask, @object
+	.size	stbi__bmask, 68
+stbi__bmask:
+	.long	0
+	.long	1
+	.long	3
+	.long	7
+	.long	15
+	.long	31
+	.long	63
+	.long	127
+	.long	255
+	.long	511
+	.long	1023
+	.long	2047
+	.long	4095
+	.long	8191
+	.long	16383
+	.long	32767
+	.long	65535
+	.align 8
+.LC11:
+	.string	"(((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]"
+	.text
+	.type	stbi__jpeg_huff_decode, @function
+stbi__jpeg_huff_decode:
+.LFB4952:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	$15, %eax
+	jg	.L401
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__grow_buffer_unsafe
+.L401:
+	movq	-24(%rbp), %rax
+	movl	18464(%rax), %eax
+	shrl	$23, %eax
+	movl	%eax, -12(%rbp)
+	movq	-32(%rbp), %rdx
+	movl	-12(%rbp), %eax
+	cltq
+	movzbl	(%rdx,%rax), %eax
+	movzbl	%al, %eax
+	movl	%eax, -16(%rbp)
+	cmpl	$254, -16(%rbp)
+	jg	.L402
+	movq	-32(%rbp), %rdx
+	movl	-16(%rbp), %eax
+	cltq
+	movzbl	1280(%rdx,%rax), %eax
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	%eax, -4(%rbp)
+	jle	.L403
+	movl	$-1, %eax
+	jmp	.L404
+.L403:
+	movq	-24(%rbp), %rax
+	movl	18464(%rax), %edx
+	movl	-4(%rbp), %eax
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 18464(%rax)
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	subl	-4(%rbp), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 18468(%rax)
+	movq	-32(%rbp), %rdx
+	movl	-16(%rbp), %eax
+	cltq
+	movzbl	1024(%rdx,%rax), %eax
+	movzbl	%al, %eax
+	jmp	.L404
+.L402:
+	movq	-24(%rbp), %rax
+	movl	18464(%rax), %eax
+	shrl	$16, %eax
+	movl	%eax, -8(%rbp)
+	movl	$10, -16(%rbp)
+.L407:
+	movq	-32(%rbp), %rax
+	movl	-16(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$384, %rdx
+	movl	4(%rax,%rdx,4), %eax
+	cmpl	%eax, -8(%rbp)
+	jb	.L414
+	addl	$1, -16(%rbp)
+	jmp	.L407
+.L414:
+	nop
+	cmpl	$17, -16(%rbp)
+	jne	.L408
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	leal	-16(%rax), %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 18468(%rax)
+	movl	$-1, %eax
+	jmp	.L404
+.L408:
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	%eax, -16(%rbp)
+	jle	.L409
+	movl	$-1, %eax
+	jmp	.L404
+.L409:
+	movq	-24(%rbp), %rax
+	movl	18464(%rax), %edx
+	movl	$32, %eax
+	subl	-16(%rbp), %eax
+	movl	%eax, %ecx
+	shrl	%cl, %edx
+	movl	%edx, %ecx
+	movl	-16(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	leaq	stbi__bmask(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	andl	%eax, %ecx
+	movq	-32(%rbp), %rax
+	movl	-16(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$400, %rdx
+	movl	12(%rax,%rdx,4), %eax
+	addl	%ecx, %eax
+	movl	%eax, -12(%rbp)
+	cmpl	$0, -12(%rbp)
+	js	.L410
+	cmpl	$255, -12(%rbp)
+	jle	.L411
+.L410:
+	movl	$-1, %eax
+	jmp	.L404
+.L411:
+	movq	-24(%rbp), %rax
+	movl	18464(%rax), %esi
+	movq	-32(%rbp), %rdx
+	movl	-12(%rbp), %eax
+	cltq
+	movzbl	1280(%rdx,%rax), %eax
+	movzbl	%al, %eax
+	movl	$32, %edx
+	subl	%eax, %edx
+	movl	%edx, %ecx
+	shrl	%cl, %esi
+	movl	%esi, %ecx
+	movq	-32(%rbp), %rdx
+	movl	-12(%rbp), %eax
+	cltq
+	movzbl	1280(%rdx,%rax), %eax
+	movzbl	%al, %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	leaq	stbi__bmask(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	andl	%eax, %ecx
+	movq	-32(%rbp), %rax
+	movl	-12(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$256, %rdx
+	movzwl	(%rax,%rdx,2), %eax
+	movzwl	%ax, %eax
+	cmpl	%eax, %ecx
+	je	.L412
+	leaq	__PRETTY_FUNCTION__.16(%rip), %rax
+	movq	%rax, %rcx
+	movl	$2140, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC11(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L412:
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	subl	-16(%rbp), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 18468(%rax)
+	movq	-24(%rbp), %rax
+	movl	18464(%rax), %edx
+	movl	-16(%rbp), %eax
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 18464(%rax)
+	movq	-32(%rbp), %rdx
+	movl	-12(%rbp), %eax
+	cltq
+	movzbl	1024(%rdx,%rax), %eax
+	movzbl	%al, %eax
+.L404:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4952:
+	.size	stbi__jpeg_huff_decode, .-stbi__jpeg_huff_decode
+	.section	.rodata
+	.align 32
+	.type	stbi__jbias, @object
+	.size	stbi__jbias, 64
+stbi__jbias:
+	.long	0
+	.long	-1
+	.long	-3
+	.long	-7
+	.long	-15
+	.long	-31
+	.long	-63
+	.long	-127
+	.long	-255
+	.long	-511
+	.long	-1023
+	.long	-2047
+	.long	-4095
+	.long	-8191
+	.long	-16383
+	.long	-32767
+	.text
+	.type	stbi__extend_receive, @function
+stbi__extend_receive:
+.LFB4953:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movl	%esi, -28(%rbp)
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	%eax, -28(%rbp)
+	jle	.L416
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__grow_buffer_unsafe
+.L416:
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	%eax, -28(%rbp)
+	jle	.L417
+	movl	$0, %eax
+	jmp	.L418
+.L417:
+	movq	-24(%rbp), %rax
+	movl	18464(%rax), %eax
+	shrl	$31, %eax
+	movl	%eax, -8(%rbp)
+	movq	-24(%rbp), %rax
+	movl	18464(%rax), %edx
+	movl	-28(%rbp), %eax
+	movl	%eax, %ecx
+	roll	%cl, %edx
+	movl	%edx, %eax
+	movl	%eax, -4(%rbp)
+	movl	-28(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	leaq	stbi__bmask(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	notl	%eax
+	andl	-4(%rbp), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 18464(%rax)
+	movl	-28(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	leaq	stbi__bmask(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	andl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	subl	-28(%rbp), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 18468(%rax)
+	movl	-28(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	leaq	stbi__jbias(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	movl	-8(%rbp), %edx
+	subl	$1, %edx
+	andl	%edx, %eax
+	movl	%eax, %edx
+	movl	-4(%rbp), %eax
+	addl	%edx, %eax
+.L418:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4953:
+	.size	stbi__extend_receive, .-stbi__extend_receive
+	.type	stbi__jpeg_get_bits, @function
+stbi__jpeg_get_bits:
+.LFB4954:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movl	%esi, -28(%rbp)
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	%eax, -28(%rbp)
+	jle	.L420
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__grow_buffer_unsafe
+.L420:
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	%eax, -28(%rbp)
+	jle	.L421
+	movl	$0, %eax
+	jmp	.L422
+.L421:
+	movq	-24(%rbp), %rax
+	movl	18464(%rax), %edx
+	movl	-28(%rbp), %eax
+	movl	%eax, %ecx
+	roll	%cl, %edx
+	movl	%edx, %eax
+	movl	%eax, -4(%rbp)
+	movl	-28(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	leaq	stbi__bmask(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	notl	%eax
+	andl	-4(%rbp), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 18464(%rax)
+	movl	-28(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	leaq	stbi__bmask(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	andl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	subl	-28(%rbp), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 18468(%rax)
+	movl	-4(%rbp), %eax
+.L422:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4954:
+	.size	stbi__jpeg_get_bits, .-stbi__jpeg_get_bits
+	.type	stbi__jpeg_get_bit, @function
+stbi__jpeg_get_bit:
+.LFB4955:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	testl	%eax, %eax
+	jg	.L424
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__grow_buffer_unsafe
+.L424:
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	testl	%eax, %eax
+	jg	.L425
+	movl	$0, %eax
+	jmp	.L426
+.L425:
+	movq	-24(%rbp), %rax
+	movl	18464(%rax), %eax
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movl	18464(%rax), %eax
+	leal	(%rax,%rax), %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 18464(%rax)
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	leal	-1(%rax), %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 18468(%rax)
+	movl	-4(%rbp), %eax
+	andl	$-2147483648, %eax
+.L426:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4955:
+	.size	stbi__jpeg_get_bit, .-stbi__jpeg_get_bit
+	.section	.rodata
+	.align 32
+	.type	stbi__jpeg_dezigzag, @object
+	.size	stbi__jpeg_dezigzag, 79
+stbi__jpeg_dezigzag:
+	.string	""
+	.ascii	"\001\b\020\t\002\003\n\021\030 \031\022\013\004\005\f\023\032"
+	.ascii	"!(0)\"\033\024\r\006\007\016\025\034#*1892+$\035\026\017\027"
+	.ascii	"\036%,3:;4-&\037'.5<=6/7>????????????????"
+.LC12:
+	.string	"bad huffman code"
+.LC13:
+	.string	"bad delta"
+.LC14:
+	.string	"can't merge dc and ac"
+	.text
+	.type	stbi__jpeg_decode_block, @function
+stbi__jpeg_decode_block:
+.LFB4956:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$96, %rsp
+	movq	%rdi, -56(%rbp)
+	movq	%rsi, -64(%rbp)
+	movq	%rdx, -72(%rbp)
+	movq	%rcx, -80(%rbp)
+	movq	%r8, -88(%rbp)
+	movl	%r9d, -92(%rbp)
+	movq	-56(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	$15, %eax
+	jg	.L428
+	movq	-56(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__grow_buffer_unsafe
+.L428:
+	movq	-72(%rbp), %rdx
+	movq	-56(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__jpeg_huff_decode
+	movl	%eax, -32(%rbp)
+	cmpl	$0, -32(%rbp)
+	js	.L429
+	cmpl	$15, -32(%rbp)
+	jle	.L430
+.L429:
+	leaq	.LC12(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L431
+.L430:
+	movq	-64(%rbp), %rax
+	movl	$128, %edx
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+	cmpl	$0, -32(%rbp)
+	je	.L432
+	movl	-32(%rbp), %edx
+	movq	-56(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__extend_receive
+	jmp	.L433
+.L432:
+	movl	$0, %eax
+.L433:
+	movl	%eax, -28(%rbp)
+	movq	-56(%rbp), %rcx
+	movl	-92(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18104, %rax
+	movl	(%rax), %eax
+	movl	-28(%rbp), %edx
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__addints_valid
+	testl	%eax, %eax
+	jne	.L434
+	leaq	.LC13(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L431
+.L434:
+	movq	-56(%rbp), %rcx
+	movl	-92(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18104, %rax
+	movl	(%rax), %edx
+	movl	-28(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -24(%rbp)
+	movq	-56(%rbp), %rcx
+	movl	-92(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	leaq	18104(%rax), %rdx
+	movl	-24(%rbp), %eax
+	movl	%eax, (%rdx)
+	movq	16(%rbp), %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %edx
+	movl	-24(%rbp), %eax
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__mul2shorts_valid
+	testl	%eax, %eax
+	jne	.L435
+	leaq	.LC14(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L431
+.L435:
+	movl	-24(%rbp), %eax
+	movl	%eax, %edx
+	movq	16(%rbp), %rax
+	movzwl	(%rax), %eax
+	imull	%edx, %eax
+	movl	%eax, %edx
+	movq	-64(%rbp), %rax
+	movw	%dx, (%rax)
+	movl	$1, -36(%rbp)
+.L444:
+	movq	-56(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	$15, %eax
+	jg	.L436
+	movq	-56(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__grow_buffer_unsafe
+.L436:
+	movq	-56(%rbp), %rax
+	movl	18464(%rax), %eax
+	shrl	$23, %eax
+	movl	%eax, -20(%rbp)
+	movl	-20(%rbp), %eax
+	cltq
+	leaq	(%rax,%rax), %rdx
+	movq	-88(%rbp), %rax
+	addq	%rdx, %rax
+	movzwl	(%rax), %eax
+	cwtl
+	movl	%eax, -16(%rbp)
+	cmpl	$0, -16(%rbp)
+	je	.L437
+	movl	-16(%rbp), %eax
+	sarl	$4, %eax
+	andl	$15, %eax
+	addl	%eax, -36(%rbp)
+	movl	-16(%rbp), %eax
+	andl	$15, %eax
+	movl	%eax, -8(%rbp)
+	movq	-56(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	%eax, -8(%rbp)
+	jle	.L438
+	leaq	.LC12(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L431
+.L438:
+	movq	-56(%rbp), %rax
+	movl	18464(%rax), %edx
+	movl	-8(%rbp), %eax
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movq	-56(%rbp), %rax
+	movl	%edx, 18464(%rax)
+	movq	-56(%rbp), %rax
+	movl	18468(%rax), %eax
+	subl	-8(%rbp), %eax
+	movl	%eax, %edx
+	movq	-56(%rbp), %rax
+	movl	%edx, 18468(%rax)
+	movl	-36(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -36(%rbp)
+	cltq
+	leaq	stbi__jpeg_dezigzag(%rip), %rdx
+	movzbl	(%rax,%rdx), %eax
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+	movl	-16(%rbp), %eax
+	sarl	$8, %eax
+	movl	%eax, %ecx
+	movl	-4(%rbp), %eax
+	leaq	(%rax,%rax), %rdx
+	movq	16(%rbp), %rax
+	addq	%rdx, %rax
+	movzwl	(%rax), %eax
+	imull	%eax, %ecx
+	movl	-4(%rbp), %eax
+	leaq	(%rax,%rax), %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movw	%dx, (%rax)
+	jmp	.L439
+.L437:
+	movq	-80(%rbp), %rdx
+	movq	-56(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__jpeg_huff_decode
+	movl	%eax, -12(%rbp)
+	cmpl	$0, -12(%rbp)
+	jns	.L440
+	leaq	.LC12(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L431
+.L440:
+	movl	-12(%rbp), %eax
+	andl	$15, %eax
+	movl	%eax, -8(%rbp)
+	movl	-12(%rbp), %eax
+	sarl	$4, %eax
+	movl	%eax, -16(%rbp)
+	cmpl	$0, -8(%rbp)
+	jne	.L441
+	cmpl	$240, -12(%rbp)
+	jne	.L445
+	addl	$16, -36(%rbp)
+	jmp	.L439
+.L441:
+	movl	-16(%rbp), %eax
+	addl	%eax, -36(%rbp)
+	movl	-36(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -36(%rbp)
+	cltq
+	leaq	stbi__jpeg_dezigzag(%rip), %rdx
+	movzbl	(%rax,%rdx), %eax
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+	movl	-8(%rbp), %edx
+	movq	-56(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__extend_receive
+	movl	%eax, %ecx
+	movl	-4(%rbp), %eax
+	leaq	(%rax,%rax), %rdx
+	movq	16(%rbp), %rax
+	addq	%rdx, %rax
+	movzwl	(%rax), %eax
+	imull	%eax, %ecx
+	movl	-4(%rbp), %eax
+	leaq	(%rax,%rax), %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movw	%dx, (%rax)
+.L439:
+	cmpl	$63, -36(%rbp)
+	jle	.L444
+	jmp	.L443
+.L445:
+	nop
+.L443:
+	movl	$1, %eax
+.L431:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4956:
+	.size	stbi__jpeg_decode_block, .-stbi__jpeg_decode_block
+	.type	stbi__jpeg_decode_block_prog_dc, @function
+stbi__jpeg_decode_block_prog_dc:
+.LFB4957:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movl	%ecx, -44(%rbp)
+	movq	-24(%rbp), %rax
+	movl	18488(%rax), %eax
+	testl	%eax, %eax
+	je	.L447
+	leaq	.LC14(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L448
+.L447:
+	movq	-24(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	$15, %eax
+	jg	.L449
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__grow_buffer_unsafe
+.L449:
+	movq	-24(%rbp), %rax
+	movl	18492(%rax), %eax
+	testl	%eax, %eax
+	jne	.L450
+	movq	-32(%rbp), %rax
+	movl	$128, %edx
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+	movq	-40(%rbp), %rdx
+	movq	-24(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__jpeg_huff_decode
+	movl	%eax, -12(%rbp)
+	cmpl	$0, -12(%rbp)
+	js	.L451
+	cmpl	$15, -12(%rbp)
+	jle	.L452
+.L451:
+	leaq	.LC14(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L448
+.L452:
+	cmpl	$0, -12(%rbp)
+	je	.L453
+	movl	-12(%rbp), %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__extend_receive
+	jmp	.L454
+.L453:
+	movl	$0, %eax
+.L454:
+	movl	%eax, -8(%rbp)
+	movq	-24(%rbp), %rcx
+	movl	-44(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18104, %rax
+	movl	(%rax), %eax
+	movl	-8(%rbp), %edx
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__addints_valid
+	testl	%eax, %eax
+	jne	.L455
+	leaq	.LC13(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L448
+.L455:
+	movq	-24(%rbp), %rcx
+	movl	-44(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18104, %rax
+	movl	(%rax), %edx
+	movl	-8(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rcx
+	movl	-44(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	leaq	18104(%rax), %rdx
+	movl	-4(%rbp), %eax
+	movl	%eax, (%rdx)
+	movq	-24(%rbp), %rax
+	movl	18496(%rax), %eax
+	movl	$1, %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	-4(%rbp), %eax
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__mul2shorts_valid
+	testl	%eax, %eax
+	jne	.L456
+	leaq	.LC14(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L448
+.L456:
+	movl	-4(%rbp), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	18496(%rax), %eax
+	movl	$1, %esi
+	movl	%eax, %ecx
+	sall	%cl, %esi
+	movl	%esi, %eax
+	imull	%edx, %eax
+	movl	%eax, %edx
+	movq	-32(%rbp), %rax
+	movw	%dx, (%rax)
+	jmp	.L457
+.L450:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__jpeg_get_bit
+	testl	%eax, %eax
+	je	.L457
+	movq	-32(%rbp), %rax
+	movzwl	(%rax), %eax
+	movl	%eax, %esi
+	movq	-24(%rbp), %rax
+	movl	18496(%rax), %eax
+	movl	$1, %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	%edx, %eax
+	addl	%esi, %eax
+	movl	%eax, %edx
+	movq	-32(%rbp), %rax
+	movw	%dx, (%rax)
+.L457:
+	movl	$1, %eax
+.L448:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4957:
+	.size	stbi__jpeg_decode_block_prog_dc, .-stbi__jpeg_decode_block_prog_dc
+	.type	stbi__jpeg_decode_block_prog_ac, @function
+stbi__jpeg_decode_block_prog_ac:
+.LFB4958:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$96, %rsp
+	movq	%rdi, -72(%rbp)
+	movq	%rsi, -80(%rbp)
+	movq	%rdx, -88(%rbp)
+	movq	%rcx, -96(%rbp)
+	movq	-72(%rbp), %rax
+	movl	18484(%rax), %eax
+	testl	%eax, %eax
+	jne	.L459
+	leaq	.LC14(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L460
+.L459:
+	movq	-72(%rbp), %rax
+	movl	18492(%rax), %eax
+	testl	%eax, %eax
+	jne	.L461
+	movq	-72(%rbp), %rax
+	movl	18496(%rax), %eax
+	movl	%eax, -40(%rbp)
+	movq	-72(%rbp), %rax
+	movl	18500(%rax), %eax
+	testl	%eax, %eax
+	je	.L462
+	movq	-72(%rbp), %rax
+	movl	18500(%rax), %eax
+	leal	-1(%rax), %edx
+	movq	-72(%rbp), %rax
+	movl	%edx, 18500(%rax)
+	movl	$1, %eax
+	jmp	.L460
+.L462:
+	movq	-72(%rbp), %rax
+	movl	18484(%rax), %eax
+	movl	%eax, -56(%rbp)
+.L472:
+	movq	-72(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	$15, %eax
+	jg	.L463
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__grow_buffer_unsafe
+.L463:
+	movq	-72(%rbp), %rax
+	movl	18464(%rax), %eax
+	shrl	$23, %eax
+	movl	%eax, -36(%rbp)
+	movl	-36(%rbp), %eax
+	cltq
+	leaq	(%rax,%rax), %rdx
+	movq	-96(%rbp), %rax
+	addq	%rdx, %rax
+	movzwl	(%rax), %eax
+	cwtl
+	movl	%eax, -32(%rbp)
+	cmpl	$0, -32(%rbp)
+	je	.L464
+	movl	-32(%rbp), %eax
+	sarl	$4, %eax
+	andl	$15, %eax
+	addl	%eax, -56(%rbp)
+	movl	-32(%rbp), %eax
+	andl	$15, %eax
+	movl	%eax, -24(%rbp)
+	movq	-72(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	%eax, -24(%rbp)
+	jle	.L465
+	leaq	.LC12(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L460
+.L465:
+	movq	-72(%rbp), %rax
+	movl	18464(%rax), %edx
+	movl	-24(%rbp), %eax
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movq	-72(%rbp), %rax
+	movl	%edx, 18464(%rax)
+	movq	-72(%rbp), %rax
+	movl	18468(%rax), %eax
+	subl	-24(%rbp), %eax
+	movl	%eax, %edx
+	movq	-72(%rbp), %rax
+	movl	%edx, 18468(%rax)
+	movl	-56(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -56(%rbp)
+	cltq
+	leaq	stbi__jpeg_dezigzag(%rip), %rdx
+	movzbl	(%rax,%rdx), %eax
+	movzbl	%al, %eax
+	movl	%eax, -20(%rbp)
+	movl	-32(%rbp), %eax
+	sarl	$8, %eax
+	movl	%eax, %edx
+	movl	-40(%rbp), %eax
+	movl	$1, %esi
+	movl	%eax, %ecx
+	sall	%cl, %esi
+	movl	%esi, %eax
+	movl	%edx, %ecx
+	imull	%eax, %ecx
+	movl	-20(%rbp), %eax
+	leaq	(%rax,%rax), %rdx
+	movq	-80(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movw	%dx, (%rax)
+	jmp	.L466
+.L464:
+	movq	-88(%rbp), %rdx
+	movq	-72(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__jpeg_huff_decode
+	movl	%eax, -28(%rbp)
+	cmpl	$0, -28(%rbp)
+	jns	.L467
+	leaq	.LC12(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L460
+.L467:
+	movl	-28(%rbp), %eax
+	andl	$15, %eax
+	movl	%eax, -24(%rbp)
+	movl	-28(%rbp), %eax
+	sarl	$4, %eax
+	movl	%eax, -32(%rbp)
+	cmpl	$0, -24(%rbp)
+	jne	.L468
+	cmpl	$14, -32(%rbp)
+	jg	.L469
+	movl	-32(%rbp), %eax
+	movl	$1, %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movq	-72(%rbp), %rax
+	movl	%edx, 18500(%rax)
+	cmpl	$0, -32(%rbp)
+	je	.L470
+	movl	-32(%rbp), %edx
+	movq	-72(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__jpeg_get_bits
+	movq	-72(%rbp), %rdx
+	movl	18500(%rdx), %edx
+	addl	%eax, %edx
+	movq	-72(%rbp), %rax
+	movl	%edx, 18500(%rax)
+.L470:
+	movq	-72(%rbp), %rax
+	movl	18500(%rax), %eax
+	leal	-1(%rax), %edx
+	movq	-72(%rbp), %rax
+	movl	%edx, 18500(%rax)
+	jmp	.L473
+.L469:
+	addl	$16, -56(%rbp)
+	jmp	.L466
+.L468:
+	movl	-32(%rbp), %eax
+	addl	%eax, -56(%rbp)
+	movl	-56(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -56(%rbp)
+	cltq
+	leaq	stbi__jpeg_dezigzag(%rip), %rdx
+	movzbl	(%rax,%rdx), %eax
+	movzbl	%al, %eax
+	movl	%eax, -20(%rbp)
+	movl	-24(%rbp), %edx
+	movq	-72(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__extend_receive
+	movl	%eax, %edx
+	movl	-40(%rbp), %eax
+	movl	$1, %esi
+	movl	%eax, %ecx
+	sall	%cl, %esi
+	movl	%esi, %eax
+	movl	%edx, %ecx
+	imull	%eax, %ecx
+	movl	-20(%rbp), %eax
+	leaq	(%rax,%rax), %rdx
+	movq	-80(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movw	%dx, (%rax)
+.L466:
+	movq	-72(%rbp), %rax
+	movl	18488(%rax), %eax
+	cmpl	%eax, -56(%rbp)
+	jle	.L472
+	jmp	.L473
+.L461:
+	movq	-72(%rbp), %rax
+	movl	18496(%rax), %eax
+	movl	$1, %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	%edx, %eax
+	movw	%ax, -58(%rbp)
+	movq	-72(%rbp), %rax
+	movl	18500(%rax), %eax
+	testl	%eax, %eax
+	je	.L474
+	movq	-72(%rbp), %rax
+	movl	18500(%rax), %eax
+	leal	-1(%rax), %edx
+	movq	-72(%rbp), %rax
+	movl	%edx, 18500(%rax)
+	movq	-72(%rbp), %rax
+	movl	18484(%rax), %eax
+	movl	%eax, -56(%rbp)
+	jmp	.L475
+.L478:
+	movl	-56(%rbp), %eax
+	cltq
+	leaq	stbi__jpeg_dezigzag(%rip), %rdx
+	movzbl	(%rax,%rdx), %eax
+	movzbl	%al, %eax
+	leaq	(%rax,%rax), %rdx
+	movq	-80(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movzwl	(%rax), %eax
+	testw	%ax, %ax
+	je	.L476
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__jpeg_get_bit
+	testl	%eax, %eax
+	je	.L476
+	movq	-8(%rbp), %rax
+	movzwl	(%rax), %eax
+	andw	-58(%rbp), %ax
+	testw	%ax, %ax
+	jne	.L476
+	movq	-8(%rbp), %rax
+	movzwl	(%rax), %eax
+	testw	%ax, %ax
+	jle	.L477
+	movq	-8(%rbp), %rax
+	movzwl	(%rax), %eax
+	movl	%eax, %edx
+	movzwl	-58(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, %edx
+	movq	-8(%rbp), %rax
+	movw	%dx, (%rax)
+	jmp	.L476
+.L477:
+	movq	-8(%rbp), %rax
+	movzwl	(%rax), %eax
+	movl	%eax, %edx
+	movzwl	-58(%rbp), %eax
+	subl	%eax, %edx
+	movq	-8(%rbp), %rax
+	movw	%dx, (%rax)
+.L476:
+	addl	$1, -56(%rbp)
+.L475:
+	movq	-72(%rbp), %rax
+	movl	18488(%rax), %eax
+	cmpl	%eax, -56(%rbp)
+	jle	.L478
+	jmp	.L473
+.L474:
+	movq	-72(%rbp), %rax
+	movl	18484(%rax), %eax
+	movl	%eax, -56(%rbp)
+.L491:
+	movq	-88(%rbp), %rdx
+	movq	-72(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__jpeg_huff_decode
+	movl	%eax, -44(%rbp)
+	cmpl	$0, -44(%rbp)
+	jns	.L479
+	leaq	.LC12(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L460
+.L479:
+	movl	-44(%rbp), %eax
+	andl	$15, %eax
+	movl	%eax, -48(%rbp)
+	movl	-44(%rbp), %eax
+	sarl	$4, %eax
+	movl	%eax, -52(%rbp)
+	cmpl	$0, -48(%rbp)
+	jne	.L480
+	cmpl	$14, -52(%rbp)
+	jg	.L485
+	movl	-52(%rbp), %eax
+	movl	$1, %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	%edx, %eax
+	leal	-1(%rax), %edx
+	movq	-72(%rbp), %rax
+	movl	%edx, 18500(%rax)
+	cmpl	$0, -52(%rbp)
+	je	.L482
+	movl	-52(%rbp), %edx
+	movq	-72(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__jpeg_get_bits
+	movq	-72(%rbp), %rdx
+	movl	18500(%rdx), %edx
+	addl	%eax, %edx
+	movq	-72(%rbp), %rax
+	movl	%edx, 18500(%rax)
+.L482:
+	movl	$64, -52(%rbp)
+	jmp	.L485
+.L480:
+	cmpl	$1, -48(%rbp)
+	je	.L483
+	leaq	.LC12(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L460
+.L483:
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__jpeg_get_bit
+	testl	%eax, %eax
+	je	.L484
+	movswl	-58(%rbp), %eax
+	movl	%eax, -48(%rbp)
+	jmp	.L485
+.L484:
+	movswl	-58(%rbp), %eax
+	negl	%eax
+	movl	%eax, -48(%rbp)
+	jmp	.L485
+.L490:
+	movl	-56(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -56(%rbp)
+	cltq
+	leaq	stbi__jpeg_dezigzag(%rip), %rdx
+	movzbl	(%rax,%rdx), %eax
+	movzbl	%al, %eax
+	leaq	(%rax,%rax), %rdx
+	movq	-80(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -16(%rbp)
+	movq	-16(%rbp), %rax
+	movzwl	(%rax), %eax
+	testw	%ax, %ax
+	je	.L486
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__jpeg_get_bit
+	testl	%eax, %eax
+	je	.L485
+	movq	-16(%rbp), %rax
+	movzwl	(%rax), %eax
+	andw	-58(%rbp), %ax
+	testw	%ax, %ax
+	jne	.L485
+	movq	-16(%rbp), %rax
+	movzwl	(%rax), %eax
+	testw	%ax, %ax
+	jle	.L487
+	movq	-16(%rbp), %rax
+	movzwl	(%rax), %eax
+	movl	%eax, %edx
+	movzwl	-58(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, %edx
+	movq	-16(%rbp), %rax
+	movw	%dx, (%rax)
+	jmp	.L485
+.L487:
+	movq	-16(%rbp), %rax
+	movzwl	(%rax), %eax
+	movl	%eax, %edx
+	movzwl	-58(%rbp), %eax
+	subl	%eax, %edx
+	movq	-16(%rbp), %rax
+	movw	%dx, (%rax)
+	jmp	.L485
+.L486:
+	cmpl	$0, -52(%rbp)
+	jne	.L488
+	movl	-48(%rbp), %eax
+	movl	%eax, %edx
+	movq	-16(%rbp), %rax
+	movw	%dx, (%rax)
+	jmp	.L489
+.L488:
+	subl	$1, -52(%rbp)
+.L485:
+	movq	-72(%rbp), %rax
+	movl	18488(%rax), %eax
+	cmpl	%eax, -56(%rbp)
+	jle	.L490
+.L489:
+	movq	-72(%rbp), %rax
+	movl	18488(%rax), %eax
+	cmpl	%eax, -56(%rbp)
+	jle	.L491
+.L473:
+	movl	$1, %eax
+.L460:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4958:
+	.size	stbi__jpeg_decode_block_prog_ac, .-stbi__jpeg_decode_block_prog_ac
+	.type	stbi__clamp, @function
+stbi__clamp:
+.LFB4959:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -4(%rbp)
+	movl	-4(%rbp), %eax
+	cmpl	$255, %eax
+	jbe	.L493
+	cmpl	$0, -4(%rbp)
+	jns	.L494
+	movl	$0, %eax
+	jmp	.L495
+.L494:
+	cmpl	$255, -4(%rbp)
+	jle	.L493
+	movl	$-1, %eax
+	jmp	.L495
+.L493:
+	movl	-4(%rbp), %eax
+.L495:
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4959:
+	.size	stbi__clamp, .-stbi__clamp
+	.type	stbi__idct_block, @function
+stbi__idct_block:
+.LFB4960:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$456, %rsp
+	.cfi_offset 3, -24
+	movq	%rdi, -440(%rbp)
+	movl	%esi, -444(%rbp)
+	movq	%rdx, -456(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -24(%rbp)
+	xorl	%eax, %eax
+	leaq	-288(%rbp), %rax
+	movq	%rax, -312(%rbp)
+	movq	-456(%rbp), %rax
+	movq	%rax, -296(%rbp)
+	movl	$0, -424(%rbp)
+	jmp	.L497
+.L500:
+	movq	-296(%rbp), %rax
+	addq	$16, %rax
+	movzwl	(%rax), %eax
+	testw	%ax, %ax
+	jne	.L498
+	movq	-296(%rbp), %rax
+	addq	$32, %rax
+	movzwl	(%rax), %eax
+	testw	%ax, %ax
+	jne	.L498
+	movq	-296(%rbp), %rax
+	addq	$48, %rax
+	movzwl	(%rax), %eax
+	testw	%ax, %ax
+	jne	.L498
+	movq	-296(%rbp), %rax
+	addq	$64, %rax
+	movzwl	(%rax), %eax
+	testw	%ax, %ax
+	jne	.L498
+	movq	-296(%rbp), %rax
+	addq	$80, %rax
+	movzwl	(%rax), %eax
+	testw	%ax, %ax
+	jne	.L498
+	movq	-296(%rbp), %rax
+	addq	$96, %rax
+	movzwl	(%rax), %eax
+	testw	%ax, %ax
+	jne	.L498
+	movq	-296(%rbp), %rax
+	addq	$112, %rax
+	movzwl	(%rax), %eax
+	testw	%ax, %ax
+	jne	.L498
+	movq	-296(%rbp), %rax
+	movzwl	(%rax), %eax
+	cwtl
+	sall	$2, %eax
+	movl	%eax, -368(%rbp)
+	movq	-312(%rbp), %rax
+	addq	$224, %rax
+	movl	-368(%rbp), %edx
+	movl	%edx, (%rax)
+	movq	-312(%rbp), %rdx
+	addq	$192, %rdx
+	movl	(%rax), %eax
+	movl	%eax, (%rdx)
+	movq	-312(%rbp), %rax
+	addq	$160, %rax
+	movl	(%rdx), %edx
+	movl	%edx, (%rax)
+	movq	-312(%rbp), %rdx
+	subq	$-128, %rdx
+	movl	(%rax), %eax
+	movl	%eax, (%rdx)
+	movq	-312(%rbp), %rax
+	addq	$96, %rax
+	movl	(%rdx), %edx
+	movl	%edx, (%rax)
+	movq	-312(%rbp), %rdx
+	addq	$64, %rdx
+	movl	(%rax), %eax
+	movl	%eax, (%rdx)
+	movq	-312(%rbp), %rax
+	addq	$32, %rax
+	movl	(%rdx), %edx
+	movl	%edx, (%rax)
+	movl	(%rax), %edx
+	movq	-312(%rbp), %rax
+	movl	%edx, (%rax)
+	jmp	.L499
+.L498:
+	movq	-296(%rbp), %rax
+	addq	$32, %rax
+	movzwl	(%rax), %eax
+	cwtl
+	movl	%eax, -364(%rbp)
+	movq	-296(%rbp), %rax
+	addq	$96, %rax
+	movzwl	(%rax), %eax
+	cwtl
+	movl	%eax, -360(%rbp)
+	movl	-364(%rbp), %edx
+	movl	-360(%rbp), %eax
+	addl	%edx, %eax
+	imull	$2217, %eax, %eax
+	movl	%eax, -356(%rbp)
+	movl	-360(%rbp), %eax
+	imull	$-7567, %eax, %edx
+	movl	-356(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -352(%rbp)
+	movl	-364(%rbp), %eax
+	imull	$3135, %eax, %edx
+	movl	-356(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -348(%rbp)
+	movq	-296(%rbp), %rax
+	movzwl	(%rax), %eax
+	cwtl
+	movl	%eax, -364(%rbp)
+	movq	-296(%rbp), %rax
+	addq	$64, %rax
+	movzwl	(%rax), %eax
+	cwtl
+	movl	%eax, -360(%rbp)
+	movl	-364(%rbp), %edx
+	movl	-360(%rbp), %eax
+	addl	%edx, %eax
+	sall	$12, %eax
+	movl	%eax, -344(%rbp)
+	movl	-364(%rbp), %eax
+	subl	-360(%rbp), %eax
+	sall	$12, %eax
+	movl	%eax, -340(%rbp)
+	movl	-344(%rbp), %edx
+	movl	-348(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -336(%rbp)
+	movl	-344(%rbp), %eax
+	subl	-348(%rbp), %eax
+	movl	%eax, -332(%rbp)
+	movl	-340(%rbp), %edx
+	movl	-352(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -328(%rbp)
+	movl	-340(%rbp), %eax
+	subl	-352(%rbp), %eax
+	movl	%eax, -324(%rbp)
+	movq	-296(%rbp), %rax
+	addq	$112, %rax
+	movzwl	(%rax), %eax
+	cwtl
+	movl	%eax, -344(%rbp)
+	movq	-296(%rbp), %rax
+	addq	$80, %rax
+	movzwl	(%rax), %eax
+	cwtl
+	movl	%eax, -340(%rbp)
+	movq	-296(%rbp), %rax
+	addq	$48, %rax
+	movzwl	(%rax), %eax
+	cwtl
+	movl	%eax, -352(%rbp)
+	movq	-296(%rbp), %rax
+	addq	$16, %rax
+	movzwl	(%rax), %eax
+	cwtl
+	movl	%eax, -348(%rbp)
+	movl	-344(%rbp), %edx
+	movl	-352(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -360(%rbp)
+	movl	-340(%rbp), %edx
+	movl	-348(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -320(%rbp)
+	movl	-344(%rbp), %edx
+	movl	-348(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -356(%rbp)
+	movl	-340(%rbp), %edx
+	movl	-352(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -364(%rbp)
+	movl	-360(%rbp), %edx
+	movl	-320(%rbp), %eax
+	addl	%edx, %eax
+	imull	$4816, %eax, %eax
+	movl	%eax, -316(%rbp)
+	movl	-344(%rbp), %eax
+	imull	$1223, %eax, %eax
+	movl	%eax, -344(%rbp)
+	movl	-340(%rbp), %eax
+	imull	$8410, %eax, %eax
+	movl	%eax, -340(%rbp)
+	movl	-352(%rbp), %eax
+	imull	$12586, %eax, %eax
+	movl	%eax, -352(%rbp)
+	movl	-348(%rbp), %eax
+	imull	$6149, %eax, %eax
+	movl	%eax, -348(%rbp)
+	movl	-356(%rbp), %eax
+	imull	$-3685, %eax, %edx
+	movl	-316(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -356(%rbp)
+	movl	-364(%rbp), %eax
+	imull	$-10497, %eax, %edx
+	movl	-316(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -364(%rbp)
+	movl	-360(%rbp), %eax
+	imull	$-8034, %eax, %eax
+	movl	%eax, -360(%rbp)
+	movl	-320(%rbp), %eax
+	imull	$-1597, %eax, %eax
+	movl	%eax, -320(%rbp)
+	movl	-356(%rbp), %edx
+	movl	-320(%rbp), %eax
+	addl	%edx, %eax
+	addl	%eax, -348(%rbp)
+	movl	-364(%rbp), %edx
+	movl	-360(%rbp), %eax
+	addl	%edx, %eax
+	addl	%eax, -352(%rbp)
+	movl	-364(%rbp), %edx
+	movl	-320(%rbp), %eax
+	addl	%edx, %eax
+	addl	%eax, -340(%rbp)
+	movl	-356(%rbp), %edx
+	movl	-360(%rbp), %eax
+	addl	%edx, %eax
+	addl	%eax, -344(%rbp)
+	addl	$512, -336(%rbp)
+	addl	$512, -328(%rbp)
+	addl	$512, -324(%rbp)
+	addl	$512, -332(%rbp)
+	movl	-336(%rbp), %edx
+	movl	-348(%rbp), %eax
+	addl	%edx, %eax
+	sarl	$10, %eax
+	movl	%eax, %edx
+	movq	-312(%rbp), %rax
+	movl	%edx, (%rax)
+	movl	-336(%rbp), %eax
+	subl	-348(%rbp), %eax
+	movl	%eax, %edx
+	movq	-312(%rbp), %rax
+	addq	$224, %rax
+	sarl	$10, %edx
+	movl	%edx, (%rax)
+	movl	-328(%rbp), %edx
+	movl	-352(%rbp), %eax
+	addl	%eax, %edx
+	movq	-312(%rbp), %rax
+	addq	$32, %rax
+	sarl	$10, %edx
+	movl	%edx, (%rax)
+	movl	-328(%rbp), %eax
+	subl	-352(%rbp), %eax
+	movl	%eax, %edx
+	movq	-312(%rbp), %rax
+	addq	$192, %rax
+	sarl	$10, %edx
+	movl	%edx, (%rax)
+	movl	-324(%rbp), %edx
+	movl	-340(%rbp), %eax
+	addl	%eax, %edx
+	movq	-312(%rbp), %rax
+	addq	$64, %rax
+	sarl	$10, %edx
+	movl	%edx, (%rax)
+	movl	-324(%rbp), %eax
+	subl	-340(%rbp), %eax
+	movl	%eax, %edx
+	movq	-312(%rbp), %rax
+	addq	$160, %rax
+	sarl	$10, %edx
+	movl	%edx, (%rax)
+	movl	-332(%rbp), %edx
+	movl	-344(%rbp), %eax
+	addl	%eax, %edx
+	movq	-312(%rbp), %rax
+	addq	$96, %rax
+	sarl	$10, %edx
+	movl	%edx, (%rax)
+	movl	-332(%rbp), %eax
+	subl	-344(%rbp), %eax
+	movl	%eax, %edx
+	movq	-312(%rbp), %rax
+	subq	$-128, %rax
+	sarl	$10, %edx
+	movl	%edx, (%rax)
+.L499:
+	addl	$1, -424(%rbp)
+	addq	$2, -296(%rbp)
+	addq	$4, -312(%rbp)
+.L497:
+	cmpl	$7, -424(%rbp)
+	jle	.L500
+	movl	$0, -424(%rbp)
+	leaq	-288(%rbp), %rax
+	movq	%rax, -312(%rbp)
+	movq	-440(%rbp), %rax
+	movq	%rax, -304(%rbp)
+	jmp	.L501
+.L502:
+	movq	-312(%rbp), %rax
+	movl	8(%rax), %eax
+	movl	%eax, -420(%rbp)
+	movq	-312(%rbp), %rax
+	movl	24(%rax), %eax
+	movl	%eax, -416(%rbp)
+	movl	-420(%rbp), %edx
+	movl	-416(%rbp), %eax
+	addl	%edx, %eax
+	imull	$2217, %eax, %eax
+	movl	%eax, -412(%rbp)
+	movl	-416(%rbp), %eax
+	imull	$-7567, %eax, %edx
+	movl	-412(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -408(%rbp)
+	movl	-420(%rbp), %eax
+	imull	$3135, %eax, %edx
+	movl	-412(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -404(%rbp)
+	movq	-312(%rbp), %rax
+	movl	(%rax), %eax
+	movl	%eax, -420(%rbp)
+	movq	-312(%rbp), %rax
+	movl	16(%rax), %eax
+	movl	%eax, -416(%rbp)
+	movl	-420(%rbp), %edx
+	movl	-416(%rbp), %eax
+	addl	%edx, %eax
+	sall	$12, %eax
+	movl	%eax, -400(%rbp)
+	movl	-420(%rbp), %eax
+	subl	-416(%rbp), %eax
+	sall	$12, %eax
+	movl	%eax, -396(%rbp)
+	movl	-400(%rbp), %edx
+	movl	-404(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -392(%rbp)
+	movl	-400(%rbp), %eax
+	subl	-404(%rbp), %eax
+	movl	%eax, -388(%rbp)
+	movl	-396(%rbp), %edx
+	movl	-408(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -384(%rbp)
+	movl	-396(%rbp), %eax
+	subl	-408(%rbp), %eax
+	movl	%eax, -380(%rbp)
+	movq	-312(%rbp), %rax
+	movl	28(%rax), %eax
+	movl	%eax, -400(%rbp)
+	movq	-312(%rbp), %rax
+	movl	20(%rax), %eax
+	movl	%eax, -396(%rbp)
+	movq	-312(%rbp), %rax
+	movl	12(%rax), %eax
+	movl	%eax, -408(%rbp)
+	movq	-312(%rbp), %rax
+	movl	4(%rax), %eax
+	movl	%eax, -404(%rbp)
+	movl	-400(%rbp), %edx
+	movl	-408(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -416(%rbp)
+	movl	-396(%rbp), %edx
+	movl	-404(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -376(%rbp)
+	movl	-400(%rbp), %edx
+	movl	-404(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -412(%rbp)
+	movl	-396(%rbp), %edx
+	movl	-408(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -420(%rbp)
+	movl	-416(%rbp), %edx
+	movl	-376(%rbp), %eax
+	addl	%edx, %eax
+	imull	$4816, %eax, %eax
+	movl	%eax, -372(%rbp)
+	movl	-400(%rbp), %eax
+	imull	$1223, %eax, %eax
+	movl	%eax, -400(%rbp)
+	movl	-396(%rbp), %eax
+	imull	$8410, %eax, %eax
+	movl	%eax, -396(%rbp)
+	movl	-408(%rbp), %eax
+	imull	$12586, %eax, %eax
+	movl	%eax, -408(%rbp)
+	movl	-404(%rbp), %eax
+	imull	$6149, %eax, %eax
+	movl	%eax, -404(%rbp)
+	movl	-412(%rbp), %eax
+	imull	$-3685, %eax, %edx
+	movl	-372(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -412(%rbp)
+	movl	-420(%rbp), %eax
+	imull	$-10497, %eax, %edx
+	movl	-372(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -420(%rbp)
+	movl	-416(%rbp), %eax
+	imull	$-8034, %eax, %eax
+	movl	%eax, -416(%rbp)
+	movl	-376(%rbp), %eax
+	imull	$-1597, %eax, %eax
+	movl	%eax, -376(%rbp)
+	movl	-412(%rbp), %edx
+	movl	-376(%rbp), %eax
+	addl	%edx, %eax
+	addl	%eax, -404(%rbp)
+	movl	-420(%rbp), %edx
+	movl	-416(%rbp), %eax
+	addl	%edx, %eax
+	addl	%eax, -408(%rbp)
+	movl	-420(%rbp), %edx
+	movl	-376(%rbp), %eax
+	addl	%edx, %eax
+	addl	%eax, -396(%rbp)
+	movl	-412(%rbp), %edx
+	movl	-416(%rbp), %eax
+	addl	%edx, %eax
+	addl	%eax, -400(%rbp)
+	addl	$16842752, -392(%rbp)
+	addl	$16842752, -384(%rbp)
+	addl	$16842752, -380(%rbp)
+	addl	$16842752, -388(%rbp)
+	movl	-392(%rbp), %edx
+	movl	-404(%rbp), %eax
+	addl	%edx, %eax
+	sarl	$17, %eax
+	movl	%eax, %edi
+	call	stbi__clamp
+	movq	-304(%rbp), %rdx
+	movb	%al, (%rdx)
+	movl	-392(%rbp), %eax
+	subl	-404(%rbp), %eax
+	sarl	$17, %eax
+	movq	-304(%rbp), %rdx
+	leaq	7(%rdx), %rbx
+	movl	%eax, %edi
+	call	stbi__clamp
+	movb	%al, (%rbx)
+	movl	-384(%rbp), %edx
+	movl	-408(%rbp), %eax
+	addl	%edx, %eax
+	sarl	$17, %eax
+	movq	-304(%rbp), %rdx
+	leaq	1(%rdx), %rbx
+	movl	%eax, %edi
+	call	stbi__clamp
+	movb	%al, (%rbx)
+	movl	-384(%rbp), %eax
+	subl	-408(%rbp), %eax
+	sarl	$17, %eax
+	movq	-304(%rbp), %rdx
+	leaq	6(%rdx), %rbx
+	movl	%eax, %edi
+	call	stbi__clamp
+	movb	%al, (%rbx)
+	movl	-380(%rbp), %edx
+	movl	-396(%rbp), %eax
+	addl	%edx, %eax
+	sarl	$17, %eax
+	movq	-304(%rbp), %rdx
+	leaq	2(%rdx), %rbx
+	movl	%eax, %edi
+	call	stbi__clamp
+	movb	%al, (%rbx)
+	movl	-380(%rbp), %eax
+	subl	-396(%rbp), %eax
+	sarl	$17, %eax
+	movq	-304(%rbp), %rdx
+	leaq	5(%rdx), %rbx
+	movl	%eax, %edi
+	call	stbi__clamp
+	movb	%al, (%rbx)
+	movl	-388(%rbp), %edx
+	movl	-400(%rbp), %eax
+	addl	%edx, %eax
+	sarl	$17, %eax
+	movq	-304(%rbp), %rdx
+	leaq	3(%rdx), %rbx
+	movl	%eax, %edi
+	call	stbi__clamp
+	movb	%al, (%rbx)
+	movl	-388(%rbp), %eax
+	subl	-400(%rbp), %eax
+	sarl	$17, %eax
+	movq	-304(%rbp), %rdx
+	leaq	4(%rdx), %rbx
+	movl	%eax, %edi
+	call	stbi__clamp
+	movb	%al, (%rbx)
+	addl	$1, -424(%rbp)
+	addq	$32, -312(%rbp)
+	movl	-444(%rbp), %eax
+	cltq
+	addq	%rax, -304(%rbp)
+.L501:
+	cmpl	$7, -424(%rbp)
+	jle	.L502
+	nop
+	movq	-24(%rbp), %rax
+	subq	%fs:40, %rax
+	je	.L503
+	call	__stack_chk_fail@PLT
+.L503:
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4960:
+	.size	stbi__idct_block, .-stbi__idct_block
+	.type	stbi__idct_simd, @function
+stbi__idct_simd:
+.LFB4961:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$10368, %rsp
+	movq	%rdi, -10344(%rbp)
+	movl	%esi, -10348(%rbp)
+	movq	%rdx, -10360(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movw	$2217, -10104(%rbp)
+	movw	$-5350, -10102(%rbp)
+	movw	$2217, -10100(%rbp)
+	movw	$-5350, -10098(%rbp)
+	movw	$2217, -10096(%rbp)
+	movw	$-5350, -10094(%rbp)
+	movw	$2217, -10092(%rbp)
+	movw	$-5350, -10090(%rbp)
+	movswl	-10104(%rbp), %eax
+	movswl	-10102(%rbp), %edx
+	movswl	-10100(%rbp), %ecx
+	movswl	-10098(%rbp), %esi
+	movswl	-10096(%rbp), %edi
+	movswl	-10094(%rbp), %r8d
+	movswl	-10092(%rbp), %r9d
+	movswl	-10090(%rbp), %r10d
+	movw	%r10w, -10088(%rbp)
+	movw	%r9w, -10086(%rbp)
+	movw	%r8w, -10084(%rbp)
+	movw	%di, -10082(%rbp)
+	movw	%si, -10080(%rbp)
+	movw	%cx, -10078(%rbp)
+	movw	%dx, -10076(%rbp)
+	movw	%ax, -10074(%rbp)
+	movzwl	-10074(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10076(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm1
+	movzwl	-10078(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10080(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm4
+	movzwl	-10082(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10084(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm2
+	movzwl	-10086(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10088(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm3
+	punpckldq	%xmm4, %xmm1
+	movdqa	%xmm1, %xmm0
+	movdqa	%xmm0, %xmm1
+	punpckldq	%xmm3, %xmm2
+	movdqa	%xmm2, %xmm0
+	movdqa	%xmm0, %xmm2
+	punpcklqdq	%xmm2, %xmm1
+	movdqa	%xmm1, %xmm0
+	nop
+	movaps	%xmm0, -9728(%rbp)
+	movw	$5352, -10136(%rbp)
+	movw	$2217, -10134(%rbp)
+	movw	$5352, -10132(%rbp)
+	movw	$2217, -10130(%rbp)
+	movw	$5352, -10128(%rbp)
+	movw	$2217, -10126(%rbp)
+	movw	$5352, -10124(%rbp)
+	movw	$2217, -10122(%rbp)
+	movswl	-10136(%rbp), %eax
+	movswl	-10134(%rbp), %edx
+	movswl	-10132(%rbp), %ecx
+	movswl	-10130(%rbp), %esi
+	movswl	-10128(%rbp), %edi
+	movswl	-10126(%rbp), %r8d
+	movswl	-10124(%rbp), %r9d
+	movswl	-10122(%rbp), %r10d
+	movw	%r10w, -10120(%rbp)
+	movw	%r9w, -10118(%rbp)
+	movw	%r8w, -10116(%rbp)
+	movw	%di, -10114(%rbp)
+	movw	%si, -10112(%rbp)
+	movw	%cx, -10110(%rbp)
+	movw	%dx, -10108(%rbp)
+	movw	%ax, -10106(%rbp)
+	movzwl	-10106(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10108(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm1
+	movzwl	-10110(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10112(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm4
+	movzwl	-10114(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10116(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm2
+	movzwl	-10118(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10120(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm3
+	punpckldq	%xmm4, %xmm1
+	movdqa	%xmm1, %xmm0
+	movdqa	%xmm0, %xmm1
+	punpckldq	%xmm3, %xmm2
+	movdqa	%xmm2, %xmm0
+	movdqa	%xmm0, %xmm2
+	punpcklqdq	%xmm2, %xmm1
+	movdqa	%xmm1, %xmm0
+	nop
+	movaps	%xmm0, -9712(%rbp)
+	movw	$1131, -10168(%rbp)
+	movw	$4816, -10166(%rbp)
+	movw	$1131, -10164(%rbp)
+	movw	$4816, -10162(%rbp)
+	movw	$1131, -10160(%rbp)
+	movw	$4816, -10158(%rbp)
+	movw	$1131, -10156(%rbp)
+	movw	$4816, -10154(%rbp)
+	movswl	-10168(%rbp), %eax
+	movswl	-10166(%rbp), %edx
+	movswl	-10164(%rbp), %ecx
+	movswl	-10162(%rbp), %esi
+	movswl	-10160(%rbp), %edi
+	movswl	-10158(%rbp), %r8d
+	movswl	-10156(%rbp), %r9d
+	movswl	-10154(%rbp), %r10d
+	movw	%r10w, -10152(%rbp)
+	movw	%r9w, -10150(%rbp)
+	movw	%r8w, -10148(%rbp)
+	movw	%di, -10146(%rbp)
+	movw	%si, -10144(%rbp)
+	movw	%cx, -10142(%rbp)
+	movw	%dx, -10140(%rbp)
+	movw	%ax, -10138(%rbp)
+	movzwl	-10138(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10140(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm1
+	movzwl	-10142(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10144(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm4
+	movzwl	-10146(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10148(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm2
+	movzwl	-10150(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10152(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm3
+	punpckldq	%xmm4, %xmm1
+	movdqa	%xmm1, %xmm0
+	movdqa	%xmm0, %xmm1
+	punpckldq	%xmm3, %xmm2
+	movdqa	%xmm2, %xmm0
+	movdqa	%xmm0, %xmm2
+	punpcklqdq	%xmm2, %xmm1
+	movdqa	%xmm1, %xmm0
+	nop
+	movaps	%xmm0, -9696(%rbp)
+	movw	$4816, -10200(%rbp)
+	movw	$-5681, -10198(%rbp)
+	movw	$4816, -10196(%rbp)
+	movw	$-5681, -10194(%rbp)
+	movw	$4816, -10192(%rbp)
+	movw	$-5681, -10190(%rbp)
+	movw	$4816, -10188(%rbp)
+	movw	$-5681, -10186(%rbp)
+	movswl	-10200(%rbp), %eax
+	movswl	-10198(%rbp), %edx
+	movswl	-10196(%rbp), %ecx
+	movswl	-10194(%rbp), %esi
+	movswl	-10192(%rbp), %edi
+	movswl	-10190(%rbp), %r8d
+	movswl	-10188(%rbp), %r9d
+	movswl	-10186(%rbp), %r10d
+	movw	%r10w, -10184(%rbp)
+	movw	%r9w, -10182(%rbp)
+	movw	%r8w, -10180(%rbp)
+	movw	%di, -10178(%rbp)
+	movw	%si, -10176(%rbp)
+	movw	%cx, -10174(%rbp)
+	movw	%dx, -10172(%rbp)
+	movw	%ax, -10170(%rbp)
+	movzwl	-10170(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10172(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm1
+	movzwl	-10174(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10176(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm4
+	movzwl	-10178(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10180(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm2
+	movzwl	-10182(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10184(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm3
+	punpckldq	%xmm4, %xmm1
+	movdqa	%xmm1, %xmm0
+	movdqa	%xmm0, %xmm1
+	punpckldq	%xmm3, %xmm2
+	movdqa	%xmm2, %xmm0
+	movdqa	%xmm0, %xmm2
+	punpcklqdq	%xmm2, %xmm1
+	movdqa	%xmm1, %xmm0
+	nop
+	movaps	%xmm0, -9680(%rbp)
+	movw	$-6811, -10232(%rbp)
+	movw	$-8034, -10230(%rbp)
+	movw	$-6811, -10228(%rbp)
+	movw	$-8034, -10226(%rbp)
+	movw	$-6811, -10224(%rbp)
+	movw	$-8034, -10222(%rbp)
+	movw	$-6811, -10220(%rbp)
+	movw	$-8034, -10218(%rbp)
+	movswl	-10232(%rbp), %eax
+	movswl	-10230(%rbp), %edx
+	movswl	-10228(%rbp), %ecx
+	movswl	-10226(%rbp), %esi
+	movswl	-10224(%rbp), %edi
+	movswl	-10222(%rbp), %r8d
+	movswl	-10220(%rbp), %r9d
+	movswl	-10218(%rbp), %r10d
+	movw	%r10w, -10216(%rbp)
+	movw	%r9w, -10214(%rbp)
+	movw	%r8w, -10212(%rbp)
+	movw	%di, -10210(%rbp)
+	movw	%si, -10208(%rbp)
+	movw	%cx, -10206(%rbp)
+	movw	%dx, -10204(%rbp)
+	movw	%ax, -10202(%rbp)
+	movzwl	-10202(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10204(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm1
+	movzwl	-10206(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10208(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm4
+	movzwl	-10210(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10212(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm2
+	movzwl	-10214(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10216(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm3
+	punpckldq	%xmm4, %xmm1
+	movdqa	%xmm1, %xmm0
+	movdqa	%xmm0, %xmm1
+	punpckldq	%xmm3, %xmm2
+	movdqa	%xmm2, %xmm0
+	movdqa	%xmm0, %xmm2
+	punpcklqdq	%xmm2, %xmm1
+	movdqa	%xmm1, %xmm0
+	nop
+	movaps	%xmm0, -9664(%rbp)
+	movw	$-8034, -10264(%rbp)
+	movw	$4552, -10262(%rbp)
+	movw	$-8034, -10260(%rbp)
+	movw	$4552, -10258(%rbp)
+	movw	$-8034, -10256(%rbp)
+	movw	$4552, -10254(%rbp)
+	movw	$-8034, -10252(%rbp)
+	movw	$4552, -10250(%rbp)
+	movswl	-10264(%rbp), %eax
+	movswl	-10262(%rbp), %edx
+	movswl	-10260(%rbp), %ecx
+	movswl	-10258(%rbp), %esi
+	movswl	-10256(%rbp), %edi
+	movswl	-10254(%rbp), %r8d
+	movswl	-10252(%rbp), %r9d
+	movswl	-10250(%rbp), %r10d
+	movw	%r10w, -10248(%rbp)
+	movw	%r9w, -10246(%rbp)
+	movw	%r8w, -10244(%rbp)
+	movw	%di, -10242(%rbp)
+	movw	%si, -10240(%rbp)
+	movw	%cx, -10238(%rbp)
+	movw	%dx, -10236(%rbp)
+	movw	%ax, -10234(%rbp)
+	movzwl	-10234(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10236(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm1
+	movzwl	-10238(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10240(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm4
+	movzwl	-10242(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10244(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm2
+	movzwl	-10246(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10248(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm3
+	punpckldq	%xmm4, %xmm1
+	movdqa	%xmm1, %xmm0
+	movdqa	%xmm0, %xmm1
+	punpckldq	%xmm3, %xmm2
+	movdqa	%xmm2, %xmm0
+	movdqa	%xmm0, %xmm2
+	punpcklqdq	%xmm2, %xmm1
+	movdqa	%xmm1, %xmm0
+	nop
+	movaps	%xmm0, -9648(%rbp)
+	movw	$6813, -10296(%rbp)
+	movw	$-1597, -10294(%rbp)
+	movw	$6813, -10292(%rbp)
+	movw	$-1597, -10290(%rbp)
+	movw	$6813, -10288(%rbp)
+	movw	$-1597, -10286(%rbp)
+	movw	$6813, -10284(%rbp)
+	movw	$-1597, -10282(%rbp)
+	movswl	-10296(%rbp), %eax
+	movswl	-10294(%rbp), %edx
+	movswl	-10292(%rbp), %ecx
+	movswl	-10290(%rbp), %esi
+	movswl	-10288(%rbp), %edi
+	movswl	-10286(%rbp), %r8d
+	movswl	-10284(%rbp), %r9d
+	movswl	-10282(%rbp), %r10d
+	movw	%r10w, -10280(%rbp)
+	movw	%r9w, -10278(%rbp)
+	movw	%r8w, -10276(%rbp)
+	movw	%di, -10274(%rbp)
+	movw	%si, -10272(%rbp)
+	movw	%cx, -10270(%rbp)
+	movw	%dx, -10268(%rbp)
+	movw	%ax, -10266(%rbp)
+	movzwl	-10266(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10268(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm1
+	movzwl	-10270(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10272(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm4
+	movzwl	-10274(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10276(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm2
+	movzwl	-10278(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10280(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm3
+	punpckldq	%xmm4, %xmm1
+	movdqa	%xmm1, %xmm0
+	movdqa	%xmm0, %xmm1
+	punpckldq	%xmm3, %xmm2
+	movdqa	%xmm2, %xmm0
+	movdqa	%xmm0, %xmm2
+	punpcklqdq	%xmm2, %xmm1
+	movdqa	%xmm1, %xmm0
+	nop
+	movaps	%xmm0, -9632(%rbp)
+	movw	$-1597, -10328(%rbp)
+	movw	$4552, -10326(%rbp)
+	movw	$-1597, -10324(%rbp)
+	movw	$4552, -10322(%rbp)
+	movw	$-1597, -10320(%rbp)
+	movw	$4552, -10318(%rbp)
+	movw	$-1597, -10316(%rbp)
+	movw	$4552, -10314(%rbp)
+	movswl	-10328(%rbp), %eax
+	movswl	-10326(%rbp), %edx
+	movswl	-10324(%rbp), %ecx
+	movswl	-10322(%rbp), %esi
+	movswl	-10320(%rbp), %edi
+	movswl	-10318(%rbp), %r8d
+	movswl	-10316(%rbp), %r9d
+	movswl	-10314(%rbp), %r10d
+	movw	%r10w, -10312(%rbp)
+	movw	%r9w, -10310(%rbp)
+	movw	%r8w, -10308(%rbp)
+	movw	%di, -10306(%rbp)
+	movw	%si, -10304(%rbp)
+	movw	%cx, -10302(%rbp)
+	movw	%dx, -10300(%rbp)
+	movw	%ax, -10298(%rbp)
+	movzwl	-10298(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10300(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm1
+	movzwl	-10302(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10304(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm4
+	movzwl	-10306(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10308(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm2
+	movzwl	-10310(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-10312(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm3
+	punpckldq	%xmm4, %xmm1
+	movdqa	%xmm1, %xmm0
+	movdqa	%xmm0, %xmm1
+	punpckldq	%xmm3, %xmm2
+	movdqa	%xmm2, %xmm0
+	movdqa	%xmm0, %xmm2
+	punpcklqdq	%xmm2, %xmm1
+	movdqa	%xmm1, %xmm0
+	nop
+	movaps	%xmm0, -9616(%rbp)
+	movl	$512, -9892(%rbp)
+	movl	-9892(%rbp), %eax
+	movl	%eax, -9888(%rbp)
+	movl	-9892(%rbp), %eax
+	movl	%eax, -9884(%rbp)
+	movl	-9892(%rbp), %eax
+	movl	%eax, -9880(%rbp)
+	movl	-9892(%rbp), %eax
+	movl	%eax, -9876(%rbp)
+	movd	-9888(%rbp), %xmm0
+	movd	-9884(%rbp), %xmm1
+	movdqa	%xmm1, %xmm2
+	punpckldq	%xmm0, %xmm2
+	movd	-9880(%rbp), %xmm1
+	movd	-9876(%rbp), %xmm0
+	punpckldq	%xmm1, %xmm0
+	punpcklqdq	%xmm2, %xmm0
+	nop
+	movaps	%xmm0, -9600(%rbp)
+	movl	$16842752, -9912(%rbp)
+	movl	-9912(%rbp), %eax
+	movl	%eax, -9908(%rbp)
+	movl	-9912(%rbp), %eax
+	movl	%eax, -9904(%rbp)
+	movl	-9912(%rbp), %eax
+	movl	%eax, -9900(%rbp)
+	movl	-9912(%rbp), %eax
+	movl	%eax, -9896(%rbp)
+	movd	-9908(%rbp), %xmm0
+	movd	-9904(%rbp), %xmm1
+	movdqa	%xmm1, %xmm2
+	punpckldq	%xmm0, %xmm2
+	movd	-9900(%rbp), %xmm1
+	movd	-9896(%rbp), %xmm0
+	punpckldq	%xmm1, %xmm0
+	punpcklqdq	%xmm2, %xmm0
+	nop
+	movaps	%xmm0, -9584(%rbp)
+	movq	-10360(%rbp), %rax
+	movq	%rax, -9752(%rbp)
+	movq	-9752(%rbp), %rax
+	movdqa	(%rax), %xmm0
+	movaps	%xmm0, -9568(%rbp)
+	movq	-10360(%rbp), %rax
+	addq	$16, %rax
+	movq	%rax, -9760(%rbp)
+	movq	-9760(%rbp), %rax
+	movdqa	(%rax), %xmm0
+	movaps	%xmm0, -9552(%rbp)
+	movq	-10360(%rbp), %rax
+	addq	$32, %rax
+	movq	%rax, -9768(%rbp)
+	movq	-9768(%rbp), %rax
+	movdqa	(%rax), %xmm0
+	movaps	%xmm0, -9536(%rbp)
+	movq	-10360(%rbp), %rax
+	addq	$48, %rax
+	movq	%rax, -9776(%rbp)
+	movq	-9776(%rbp), %rax
+	movdqa	(%rax), %xmm0
+	movaps	%xmm0, -9520(%rbp)
+	movq	-10360(%rbp), %rax
+	addq	$64, %rax
+	movq	%rax, -9784(%rbp)
+	movq	-9784(%rbp), %rax
+	movdqa	(%rax), %xmm0
+	movaps	%xmm0, -9504(%rbp)
+	movq	-10360(%rbp), %rax
+	addq	$80, %rax
+	movq	%rax, -9792(%rbp)
+	movq	-9792(%rbp), %rax
+	movdqa	(%rax), %xmm0
+	movaps	%xmm0, -9488(%rbp)
+	movq	-10360(%rbp), %rax
+	addq	$96, %rax
+	movq	%rax, -9800(%rbp)
+	movq	-9800(%rbp), %rax
+	movdqa	(%rax), %xmm0
+	movaps	%xmm0, -9472(%rbp)
+	movq	-10360(%rbp), %rax
+	addq	$112, %rax
+	movq	%rax, -9808(%rbp)
+	movq	-9808(%rbp), %rax
+	movdqa	(%rax), %xmm0
+	movaps	%xmm0, -9456(%rbp)
+	movdqa	-9536(%rbp), %xmm0
+	movaps	%xmm0, -48(%rbp)
+	movdqa	-9472(%rbp), %xmm0
+	movaps	%xmm0, -32(%rbp)
+	movdqa	-32(%rbp), %xmm1
+	movdqa	-48(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -9440(%rbp)
+	movdqa	-9536(%rbp), %xmm0
+	movaps	%xmm0, -80(%rbp)
+	movdqa	-9472(%rbp), %xmm0
+	movaps	%xmm0, -64(%rbp)
+	movdqa	-64(%rbp), %xmm1
+	movdqa	-80(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -9424(%rbp)
+	movdqa	-9440(%rbp), %xmm0
+	movaps	%xmm0, -112(%rbp)
+	movdqa	-9728(%rbp), %xmm0
+	movaps	%xmm0, -96(%rbp)
+	movdqa	-96(%rbp), %xmm0
+	movdqa	-112(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -9408(%rbp)
+	movdqa	-9424(%rbp), %xmm0
+	movaps	%xmm0, -144(%rbp)
+	movdqa	-9728(%rbp), %xmm0
+	movaps	%xmm0, -128(%rbp)
+	movdqa	-128(%rbp), %xmm0
+	movdqa	-144(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -9392(%rbp)
+	movdqa	-9440(%rbp), %xmm0
+	movaps	%xmm0, -176(%rbp)
+	movdqa	-9712(%rbp), %xmm0
+	movaps	%xmm0, -160(%rbp)
+	movdqa	-160(%rbp), %xmm0
+	movdqa	-176(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -9376(%rbp)
+	movdqa	-9424(%rbp), %xmm0
+	movaps	%xmm0, -208(%rbp)
+	movdqa	-9712(%rbp), %xmm0
+	movaps	%xmm0, -192(%rbp)
+	movdqa	-192(%rbp), %xmm0
+	movdqa	-208(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -9360(%rbp)
+	movdqa	-9568(%rbp), %xmm0
+	movaps	%xmm0, -240(%rbp)
+	movdqa	-9504(%rbp), %xmm0
+	movaps	%xmm0, -224(%rbp)
+	movdqa	-240(%rbp), %xmm1
+	movdqa	-224(%rbp), %xmm0
+	paddw	%xmm1, %xmm0
+	movaps	%xmm0, -9344(%rbp)
+	movdqa	-9568(%rbp), %xmm0
+	movaps	%xmm0, -272(%rbp)
+	movdqa	-9504(%rbp), %xmm0
+	movaps	%xmm0, -256(%rbp)
+	movdqa	-272(%rbp), %xmm0
+	movdqa	-256(%rbp), %xmm1
+	psubw	%xmm1, %xmm0
+	movaps	%xmm0, -9328(%rbp)
+	pxor	%xmm0, %xmm0
+	movaps	%xmm0, -304(%rbp)
+	movdqa	-9344(%rbp), %xmm0
+	movaps	%xmm0, -288(%rbp)
+	movdqa	-288(%rbp), %xmm1
+	movdqa	-304(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -320(%rbp)
+	movl	$4, -9916(%rbp)
+	movdqa	-320(%rbp), %xmm1
+	movd	-9916(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -9312(%rbp)
+	pxor	%xmm0, %xmm0
+	movaps	%xmm0, -352(%rbp)
+	movdqa	-9344(%rbp), %xmm0
+	movaps	%xmm0, -336(%rbp)
+	movdqa	-336(%rbp), %xmm1
+	movdqa	-352(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -368(%rbp)
+	movl	$4, -9920(%rbp)
+	movdqa	-368(%rbp), %xmm1
+	movd	-9920(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -9296(%rbp)
+	pxor	%xmm0, %xmm0
+	movaps	%xmm0, -400(%rbp)
+	movdqa	-9328(%rbp), %xmm0
+	movaps	%xmm0, -384(%rbp)
+	movdqa	-384(%rbp), %xmm1
+	movdqa	-400(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -416(%rbp)
+	movl	$4, -9924(%rbp)
+	movdqa	-416(%rbp), %xmm1
+	movd	-9924(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -9280(%rbp)
+	pxor	%xmm0, %xmm0
+	movaps	%xmm0, -448(%rbp)
+	movdqa	-9328(%rbp), %xmm0
+	movaps	%xmm0, -432(%rbp)
+	movdqa	-432(%rbp), %xmm1
+	movdqa	-448(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -464(%rbp)
+	movl	$4, -9928(%rbp)
+	movdqa	-464(%rbp), %xmm1
+	movd	-9928(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -9264(%rbp)
+	movdqa	-9312(%rbp), %xmm0
+	movaps	%xmm0, -496(%rbp)
+	movdqa	-9376(%rbp), %xmm0
+	movaps	%xmm0, -480(%rbp)
+	movdqa	-496(%rbp), %xmm1
+	movdqa	-480(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -9248(%rbp)
+	movdqa	-9296(%rbp), %xmm0
+	movaps	%xmm0, -528(%rbp)
+	movdqa	-9360(%rbp), %xmm0
+	movaps	%xmm0, -512(%rbp)
+	movdqa	-528(%rbp), %xmm1
+	movdqa	-512(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -9232(%rbp)
+	movdqa	-9312(%rbp), %xmm0
+	movaps	%xmm0, -560(%rbp)
+	movdqa	-9376(%rbp), %xmm0
+	movaps	%xmm0, -544(%rbp)
+	movdqa	-560(%rbp), %xmm0
+	movdqa	-544(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -9216(%rbp)
+	movdqa	-9296(%rbp), %xmm0
+	movaps	%xmm0, -592(%rbp)
+	movdqa	-9360(%rbp), %xmm0
+	movaps	%xmm0, -576(%rbp)
+	movdqa	-592(%rbp), %xmm0
+	movdqa	-576(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -9200(%rbp)
+	movdqa	-9280(%rbp), %xmm0
+	movaps	%xmm0, -624(%rbp)
+	movdqa	-9408(%rbp), %xmm0
+	movaps	%xmm0, -608(%rbp)
+	movdqa	-624(%rbp), %xmm1
+	movdqa	-608(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -9184(%rbp)
+	movdqa	-9264(%rbp), %xmm0
+	movaps	%xmm0, -656(%rbp)
+	movdqa	-9392(%rbp), %xmm0
+	movaps	%xmm0, -640(%rbp)
+	movdqa	-656(%rbp), %xmm1
+	movdqa	-640(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -9168(%rbp)
+	movdqa	-9280(%rbp), %xmm0
+	movaps	%xmm0, -688(%rbp)
+	movdqa	-9408(%rbp), %xmm0
+	movaps	%xmm0, -672(%rbp)
+	movdqa	-688(%rbp), %xmm0
+	movdqa	-672(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -9152(%rbp)
+	movdqa	-9264(%rbp), %xmm0
+	movaps	%xmm0, -720(%rbp)
+	movdqa	-9392(%rbp), %xmm0
+	movaps	%xmm0, -704(%rbp)
+	movdqa	-720(%rbp), %xmm0
+	movdqa	-704(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -9136(%rbp)
+	movdqa	-9456(%rbp), %xmm0
+	movaps	%xmm0, -752(%rbp)
+	movdqa	-9520(%rbp), %xmm0
+	movaps	%xmm0, -736(%rbp)
+	movdqa	-736(%rbp), %xmm1
+	movdqa	-752(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -9120(%rbp)
+	movdqa	-9456(%rbp), %xmm0
+	movaps	%xmm0, -784(%rbp)
+	movdqa	-9520(%rbp), %xmm0
+	movaps	%xmm0, -768(%rbp)
+	movdqa	-768(%rbp), %xmm1
+	movdqa	-784(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -9104(%rbp)
+	movdqa	-9120(%rbp), %xmm0
+	movaps	%xmm0, -816(%rbp)
+	movdqa	-9664(%rbp), %xmm0
+	movaps	%xmm0, -800(%rbp)
+	movdqa	-800(%rbp), %xmm0
+	movdqa	-816(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -9088(%rbp)
+	movdqa	-9104(%rbp), %xmm0
+	movaps	%xmm0, -848(%rbp)
+	movdqa	-9664(%rbp), %xmm0
+	movaps	%xmm0, -832(%rbp)
+	movdqa	-832(%rbp), %xmm0
+	movdqa	-848(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -9072(%rbp)
+	movdqa	-9120(%rbp), %xmm0
+	movaps	%xmm0, -880(%rbp)
+	movdqa	-9648(%rbp), %xmm0
+	movaps	%xmm0, -864(%rbp)
+	movdqa	-864(%rbp), %xmm0
+	movdqa	-880(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -9056(%rbp)
+	movdqa	-9104(%rbp), %xmm0
+	movaps	%xmm0, -912(%rbp)
+	movdqa	-9648(%rbp), %xmm0
+	movaps	%xmm0, -896(%rbp)
+	movdqa	-896(%rbp), %xmm0
+	movdqa	-912(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -9040(%rbp)
+	movdqa	-9488(%rbp), %xmm0
+	movaps	%xmm0, -944(%rbp)
+	movdqa	-9552(%rbp), %xmm0
+	movaps	%xmm0, -928(%rbp)
+	movdqa	-928(%rbp), %xmm1
+	movdqa	-944(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -9024(%rbp)
+	movdqa	-9488(%rbp), %xmm0
+	movaps	%xmm0, -976(%rbp)
+	movdqa	-9552(%rbp), %xmm0
+	movaps	%xmm0, -960(%rbp)
+	movdqa	-960(%rbp), %xmm1
+	movdqa	-976(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -9008(%rbp)
+	movdqa	-9024(%rbp), %xmm0
+	movaps	%xmm0, -1008(%rbp)
+	movdqa	-9632(%rbp), %xmm0
+	movaps	%xmm0, -992(%rbp)
+	movdqa	-992(%rbp), %xmm0
+	movdqa	-1008(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -8992(%rbp)
+	movdqa	-9008(%rbp), %xmm0
+	movaps	%xmm0, -1040(%rbp)
+	movdqa	-9632(%rbp), %xmm0
+	movaps	%xmm0, -1024(%rbp)
+	movdqa	-1024(%rbp), %xmm0
+	movdqa	-1040(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -8976(%rbp)
+	movdqa	-9024(%rbp), %xmm0
+	movaps	%xmm0, -1072(%rbp)
+	movdqa	-9616(%rbp), %xmm0
+	movaps	%xmm0, -1056(%rbp)
+	movdqa	-1056(%rbp), %xmm0
+	movdqa	-1072(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -8960(%rbp)
+	movdqa	-9008(%rbp), %xmm0
+	movaps	%xmm0, -1104(%rbp)
+	movdqa	-9616(%rbp), %xmm0
+	movaps	%xmm0, -1088(%rbp)
+	movdqa	-1088(%rbp), %xmm0
+	movdqa	-1104(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -8944(%rbp)
+	movdqa	-9552(%rbp), %xmm0
+	movaps	%xmm0, -1136(%rbp)
+	movdqa	-9456(%rbp), %xmm0
+	movaps	%xmm0, -1120(%rbp)
+	movdqa	-1136(%rbp), %xmm1
+	movdqa	-1120(%rbp), %xmm0
+	paddw	%xmm1, %xmm0
+	movaps	%xmm0, -8928(%rbp)
+	movdqa	-9520(%rbp), %xmm0
+	movaps	%xmm0, -1168(%rbp)
+	movdqa	-9488(%rbp), %xmm0
+	movaps	%xmm0, -1152(%rbp)
+	movdqa	-1168(%rbp), %xmm1
+	movdqa	-1152(%rbp), %xmm0
+	paddw	%xmm1, %xmm0
+	movaps	%xmm0, -8912(%rbp)
+	movdqa	-8928(%rbp), %xmm0
+	movaps	%xmm0, -1200(%rbp)
+	movdqa	-8912(%rbp), %xmm0
+	movaps	%xmm0, -1184(%rbp)
+	movdqa	-1184(%rbp), %xmm1
+	movdqa	-1200(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -8896(%rbp)
+	movdqa	-8928(%rbp), %xmm0
+	movaps	%xmm0, -1232(%rbp)
+	movdqa	-8912(%rbp), %xmm0
+	movaps	%xmm0, -1216(%rbp)
+	movdqa	-1216(%rbp), %xmm1
+	movdqa	-1232(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -8880(%rbp)
+	movdqa	-8896(%rbp), %xmm0
+	movaps	%xmm0, -1264(%rbp)
+	movdqa	-9696(%rbp), %xmm0
+	movaps	%xmm0, -1248(%rbp)
+	movdqa	-1248(%rbp), %xmm0
+	movdqa	-1264(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -8864(%rbp)
+	movdqa	-8880(%rbp), %xmm0
+	movaps	%xmm0, -1296(%rbp)
+	movdqa	-9696(%rbp), %xmm0
+	movaps	%xmm0, -1280(%rbp)
+	movdqa	-1280(%rbp), %xmm0
+	movdqa	-1296(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -8848(%rbp)
+	movdqa	-8896(%rbp), %xmm0
+	movaps	%xmm0, -1328(%rbp)
+	movdqa	-9680(%rbp), %xmm0
+	movaps	%xmm0, -1312(%rbp)
+	movdqa	-1312(%rbp), %xmm0
+	movdqa	-1328(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -8832(%rbp)
+	movdqa	-8880(%rbp), %xmm0
+	movaps	%xmm0, -1360(%rbp)
+	movdqa	-9680(%rbp), %xmm0
+	movaps	%xmm0, -1344(%rbp)
+	movdqa	-1344(%rbp), %xmm0
+	movdqa	-1360(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -8816(%rbp)
+	movdqa	-9088(%rbp), %xmm0
+	movaps	%xmm0, -1392(%rbp)
+	movdqa	-8864(%rbp), %xmm0
+	movaps	%xmm0, -1376(%rbp)
+	movdqa	-1392(%rbp), %xmm1
+	movdqa	-1376(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8800(%rbp)
+	movdqa	-9072(%rbp), %xmm0
+	movaps	%xmm0, -1424(%rbp)
+	movdqa	-8848(%rbp), %xmm0
+	movaps	%xmm0, -1408(%rbp)
+	movdqa	-1424(%rbp), %xmm1
+	movdqa	-1408(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8784(%rbp)
+	movdqa	-8992(%rbp), %xmm0
+	movaps	%xmm0, -1456(%rbp)
+	movdqa	-8832(%rbp), %xmm0
+	movaps	%xmm0, -1440(%rbp)
+	movdqa	-1456(%rbp), %xmm1
+	movdqa	-1440(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8768(%rbp)
+	movdqa	-8976(%rbp), %xmm0
+	movaps	%xmm0, -1488(%rbp)
+	movdqa	-8816(%rbp), %xmm0
+	movaps	%xmm0, -1472(%rbp)
+	movdqa	-1488(%rbp), %xmm1
+	movdqa	-1472(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8752(%rbp)
+	movdqa	-9056(%rbp), %xmm0
+	movaps	%xmm0, -1520(%rbp)
+	movdqa	-8832(%rbp), %xmm0
+	movaps	%xmm0, -1504(%rbp)
+	movdqa	-1520(%rbp), %xmm1
+	movdqa	-1504(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8736(%rbp)
+	movdqa	-9040(%rbp), %xmm0
+	movaps	%xmm0, -1552(%rbp)
+	movdqa	-8816(%rbp), %xmm0
+	movaps	%xmm0, -1536(%rbp)
+	movdqa	-1552(%rbp), %xmm1
+	movdqa	-1536(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8720(%rbp)
+	movdqa	-8960(%rbp), %xmm0
+	movaps	%xmm0, -1584(%rbp)
+	movdqa	-8864(%rbp), %xmm0
+	movaps	%xmm0, -1568(%rbp)
+	movdqa	-1584(%rbp), %xmm1
+	movdqa	-1568(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8704(%rbp)
+	movdqa	-8944(%rbp), %xmm0
+	movaps	%xmm0, -1616(%rbp)
+	movdqa	-8848(%rbp), %xmm0
+	movaps	%xmm0, -1600(%rbp)
+	movdqa	-1616(%rbp), %xmm1
+	movdqa	-1600(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8688(%rbp)
+	movdqa	-9248(%rbp), %xmm0
+	movaps	%xmm0, -1648(%rbp)
+	movdqa	-9600(%rbp), %xmm0
+	movaps	%xmm0, -1632(%rbp)
+	movdqa	-1648(%rbp), %xmm1
+	movdqa	-1632(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8672(%rbp)
+	movdqa	-9232(%rbp), %xmm0
+	movaps	%xmm0, -1680(%rbp)
+	movdqa	-9600(%rbp), %xmm0
+	movaps	%xmm0, -1664(%rbp)
+	movdqa	-1680(%rbp), %xmm1
+	movdqa	-1664(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8656(%rbp)
+	movdqa	-8672(%rbp), %xmm0
+	movaps	%xmm0, -1712(%rbp)
+	movdqa	-8704(%rbp), %xmm0
+	movaps	%xmm0, -1696(%rbp)
+	movdqa	-1712(%rbp), %xmm1
+	movdqa	-1696(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8640(%rbp)
+	movdqa	-8656(%rbp), %xmm0
+	movaps	%xmm0, -1744(%rbp)
+	movdqa	-8688(%rbp), %xmm0
+	movaps	%xmm0, -1728(%rbp)
+	movdqa	-1744(%rbp), %xmm1
+	movdqa	-1728(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8624(%rbp)
+	movdqa	-8672(%rbp), %xmm0
+	movaps	%xmm0, -1776(%rbp)
+	movdqa	-8704(%rbp), %xmm0
+	movaps	%xmm0, -1760(%rbp)
+	movdqa	-1776(%rbp), %xmm0
+	movdqa	-1760(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -8608(%rbp)
+	movdqa	-8656(%rbp), %xmm0
+	movaps	%xmm0, -1808(%rbp)
+	movdqa	-8688(%rbp), %xmm0
+	movaps	%xmm0, -1792(%rbp)
+	movdqa	-1808(%rbp), %xmm0
+	movdqa	-1792(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -8592(%rbp)
+	movdqa	-8624(%rbp), %xmm0
+	movaps	%xmm0, -1824(%rbp)
+	movl	$10, -9932(%rbp)
+	movdqa	-1824(%rbp), %xmm1
+	movd	-9932(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm2
+	movdqa	-8640(%rbp), %xmm0
+	movaps	%xmm0, -1840(%rbp)
+	movl	$10, -9936(%rbp)
+	movdqa	-1840(%rbp), %xmm1
+	movd	-9936(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -1872(%rbp)
+	movaps	%xmm2, -1856(%rbp)
+	movdqa	-1856(%rbp), %xmm1
+	movdqa	-1872(%rbp), %xmm0
+	packssdw	%xmm1, %xmm0
+	movaps	%xmm0, -9568(%rbp)
+	movdqa	-8592(%rbp), %xmm0
+	movaps	%xmm0, -1888(%rbp)
+	movl	$10, -9940(%rbp)
+	movdqa	-1888(%rbp), %xmm1
+	movd	-9940(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm2
+	movdqa	-8608(%rbp), %xmm0
+	movaps	%xmm0, -1904(%rbp)
+	movl	$10, -9944(%rbp)
+	movdqa	-1904(%rbp), %xmm1
+	movd	-9944(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -1936(%rbp)
+	movaps	%xmm2, -1920(%rbp)
+	movdqa	-1920(%rbp), %xmm1
+	movdqa	-1936(%rbp), %xmm0
+	packssdw	%xmm1, %xmm0
+	movaps	%xmm0, -9456(%rbp)
+	movdqa	-9184(%rbp), %xmm0
+	movaps	%xmm0, -1968(%rbp)
+	movdqa	-9600(%rbp), %xmm0
+	movaps	%xmm0, -1952(%rbp)
+	movdqa	-1968(%rbp), %xmm1
+	movdqa	-1952(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8576(%rbp)
+	movdqa	-9168(%rbp), %xmm0
+	movaps	%xmm0, -2000(%rbp)
+	movdqa	-9600(%rbp), %xmm0
+	movaps	%xmm0, -1984(%rbp)
+	movdqa	-2000(%rbp), %xmm1
+	movdqa	-1984(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8560(%rbp)
+	movdqa	-8576(%rbp), %xmm0
+	movaps	%xmm0, -2032(%rbp)
+	movdqa	-8736(%rbp), %xmm0
+	movaps	%xmm0, -2016(%rbp)
+	movdqa	-2032(%rbp), %xmm1
+	movdqa	-2016(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8544(%rbp)
+	movdqa	-8560(%rbp), %xmm0
+	movaps	%xmm0, -2064(%rbp)
+	movdqa	-8720(%rbp), %xmm0
+	movaps	%xmm0, -2048(%rbp)
+	movdqa	-2064(%rbp), %xmm1
+	movdqa	-2048(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8528(%rbp)
+	movdqa	-8576(%rbp), %xmm0
+	movaps	%xmm0, -2096(%rbp)
+	movdqa	-8736(%rbp), %xmm0
+	movaps	%xmm0, -2080(%rbp)
+	movdqa	-2096(%rbp), %xmm0
+	movdqa	-2080(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -8512(%rbp)
+	movdqa	-8560(%rbp), %xmm0
+	movaps	%xmm0, -2128(%rbp)
+	movdqa	-8720(%rbp), %xmm0
+	movaps	%xmm0, -2112(%rbp)
+	movdqa	-2128(%rbp), %xmm0
+	movdqa	-2112(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -8496(%rbp)
+	movdqa	-8528(%rbp), %xmm0
+	movaps	%xmm0, -2144(%rbp)
+	movl	$10, -9948(%rbp)
+	movdqa	-2144(%rbp), %xmm1
+	movd	-9948(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm2
+	movdqa	-8544(%rbp), %xmm0
+	movaps	%xmm0, -2160(%rbp)
+	movl	$10, -9952(%rbp)
+	movdqa	-2160(%rbp), %xmm1
+	movd	-9952(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -2192(%rbp)
+	movaps	%xmm2, -2176(%rbp)
+	movdqa	-2176(%rbp), %xmm1
+	movdqa	-2192(%rbp), %xmm0
+	packssdw	%xmm1, %xmm0
+	movaps	%xmm0, -9552(%rbp)
+	movdqa	-8496(%rbp), %xmm0
+	movaps	%xmm0, -2208(%rbp)
+	movl	$10, -9956(%rbp)
+	movdqa	-2208(%rbp), %xmm1
+	movd	-9956(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm2
+	movdqa	-8512(%rbp), %xmm0
+	movaps	%xmm0, -2224(%rbp)
+	movl	$10, -9960(%rbp)
+	movdqa	-2224(%rbp), %xmm1
+	movd	-9960(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -2256(%rbp)
+	movaps	%xmm2, -2240(%rbp)
+	movdqa	-2240(%rbp), %xmm1
+	movdqa	-2256(%rbp), %xmm0
+	packssdw	%xmm1, %xmm0
+	movaps	%xmm0, -9472(%rbp)
+	movdqa	-9152(%rbp), %xmm0
+	movaps	%xmm0, -2288(%rbp)
+	movdqa	-9600(%rbp), %xmm0
+	movaps	%xmm0, -2272(%rbp)
+	movdqa	-2288(%rbp), %xmm1
+	movdqa	-2272(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8480(%rbp)
+	movdqa	-9136(%rbp), %xmm0
+	movaps	%xmm0, -2320(%rbp)
+	movdqa	-9600(%rbp), %xmm0
+	movaps	%xmm0, -2304(%rbp)
+	movdqa	-2320(%rbp), %xmm1
+	movdqa	-2304(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8464(%rbp)
+	movdqa	-8480(%rbp), %xmm0
+	movaps	%xmm0, -2352(%rbp)
+	movdqa	-8768(%rbp), %xmm0
+	movaps	%xmm0, -2336(%rbp)
+	movdqa	-2352(%rbp), %xmm1
+	movdqa	-2336(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8448(%rbp)
+	movdqa	-8464(%rbp), %xmm0
+	movaps	%xmm0, -2384(%rbp)
+	movdqa	-8752(%rbp), %xmm0
+	movaps	%xmm0, -2368(%rbp)
+	movdqa	-2384(%rbp), %xmm1
+	movdqa	-2368(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8432(%rbp)
+	movdqa	-8480(%rbp), %xmm0
+	movaps	%xmm0, -2416(%rbp)
+	movdqa	-8768(%rbp), %xmm0
+	movaps	%xmm0, -2400(%rbp)
+	movdqa	-2416(%rbp), %xmm0
+	movdqa	-2400(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -8416(%rbp)
+	movdqa	-8464(%rbp), %xmm0
+	movaps	%xmm0, -2448(%rbp)
+	movdqa	-8752(%rbp), %xmm0
+	movaps	%xmm0, -2432(%rbp)
+	movdqa	-2448(%rbp), %xmm0
+	movdqa	-2432(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -8400(%rbp)
+	movdqa	-8432(%rbp), %xmm0
+	movaps	%xmm0, -2464(%rbp)
+	movl	$10, -9964(%rbp)
+	movdqa	-2464(%rbp), %xmm1
+	movd	-9964(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm2
+	movdqa	-8448(%rbp), %xmm0
+	movaps	%xmm0, -2480(%rbp)
+	movl	$10, -9968(%rbp)
+	movdqa	-2480(%rbp), %xmm1
+	movd	-9968(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -2512(%rbp)
+	movaps	%xmm2, -2496(%rbp)
+	movdqa	-2496(%rbp), %xmm1
+	movdqa	-2512(%rbp), %xmm0
+	packssdw	%xmm1, %xmm0
+	movaps	%xmm0, -9536(%rbp)
+	movdqa	-8400(%rbp), %xmm0
+	movaps	%xmm0, -2528(%rbp)
+	movl	$10, -9972(%rbp)
+	movdqa	-2528(%rbp), %xmm1
+	movd	-9972(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm2
+	movdqa	-8416(%rbp), %xmm0
+	movaps	%xmm0, -2544(%rbp)
+	movl	$10, -9976(%rbp)
+	movdqa	-2544(%rbp), %xmm1
+	movd	-9976(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -2576(%rbp)
+	movaps	%xmm2, -2560(%rbp)
+	movdqa	-2560(%rbp), %xmm1
+	movdqa	-2576(%rbp), %xmm0
+	packssdw	%xmm1, %xmm0
+	movaps	%xmm0, -9488(%rbp)
+	movdqa	-9216(%rbp), %xmm0
+	movaps	%xmm0, -2608(%rbp)
+	movdqa	-9600(%rbp), %xmm0
+	movaps	%xmm0, -2592(%rbp)
+	movdqa	-2608(%rbp), %xmm1
+	movdqa	-2592(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8384(%rbp)
+	movdqa	-9200(%rbp), %xmm0
+	movaps	%xmm0, -2640(%rbp)
+	movdqa	-9600(%rbp), %xmm0
+	movaps	%xmm0, -2624(%rbp)
+	movdqa	-2640(%rbp), %xmm1
+	movdqa	-2624(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8368(%rbp)
+	movdqa	-8384(%rbp), %xmm0
+	movaps	%xmm0, -2672(%rbp)
+	movdqa	-8800(%rbp), %xmm0
+	movaps	%xmm0, -2656(%rbp)
+	movdqa	-2672(%rbp), %xmm1
+	movdqa	-2656(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8352(%rbp)
+	movdqa	-8368(%rbp), %xmm0
+	movaps	%xmm0, -2704(%rbp)
+	movdqa	-8784(%rbp), %xmm0
+	movaps	%xmm0, -2688(%rbp)
+	movdqa	-2704(%rbp), %xmm1
+	movdqa	-2688(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8336(%rbp)
+	movdqa	-8384(%rbp), %xmm0
+	movaps	%xmm0, -2736(%rbp)
+	movdqa	-8800(%rbp), %xmm0
+	movaps	%xmm0, -2720(%rbp)
+	movdqa	-2736(%rbp), %xmm0
+	movdqa	-2720(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -8320(%rbp)
+	movdqa	-8368(%rbp), %xmm0
+	movaps	%xmm0, -2768(%rbp)
+	movdqa	-8784(%rbp), %xmm0
+	movaps	%xmm0, -2752(%rbp)
+	movdqa	-2768(%rbp), %xmm0
+	movdqa	-2752(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -8304(%rbp)
+	movdqa	-8336(%rbp), %xmm0
+	movaps	%xmm0, -2784(%rbp)
+	movl	$10, -9980(%rbp)
+	movdqa	-2784(%rbp), %xmm1
+	movd	-9980(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm2
+	movdqa	-8352(%rbp), %xmm0
+	movaps	%xmm0, -2800(%rbp)
+	movl	$10, -9984(%rbp)
+	movdqa	-2800(%rbp), %xmm1
+	movd	-9984(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -2832(%rbp)
+	movaps	%xmm2, -2816(%rbp)
+	movdqa	-2816(%rbp), %xmm1
+	movdqa	-2832(%rbp), %xmm0
+	packssdw	%xmm1, %xmm0
+	movaps	%xmm0, -9520(%rbp)
+	movdqa	-8304(%rbp), %xmm0
+	movaps	%xmm0, -2848(%rbp)
+	movl	$10, -9988(%rbp)
+	movdqa	-2848(%rbp), %xmm1
+	movd	-9988(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm2
+	movdqa	-8320(%rbp), %xmm0
+	movaps	%xmm0, -2864(%rbp)
+	movl	$10, -9992(%rbp)
+	movdqa	-2864(%rbp), %xmm1
+	movd	-9992(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -2896(%rbp)
+	movaps	%xmm2, -2880(%rbp)
+	movdqa	-2880(%rbp), %xmm1
+	movdqa	-2896(%rbp), %xmm0
+	packssdw	%xmm1, %xmm0
+	movaps	%xmm0, -9504(%rbp)
+	movdqa	-9568(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-9568(%rbp), %xmm0
+	movaps	%xmm0, -2928(%rbp)
+	movdqa	-9504(%rbp), %xmm0
+	movaps	%xmm0, -2912(%rbp)
+	movdqa	-2912(%rbp), %xmm1
+	movdqa	-2928(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -9568(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -2960(%rbp)
+	movdqa	-9504(%rbp), %xmm0
+	movaps	%xmm0, -2944(%rbp)
+	movdqa	-2944(%rbp), %xmm1
+	movdqa	-2960(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -9504(%rbp)
+	movdqa	-9552(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-9552(%rbp), %xmm0
+	movaps	%xmm0, -2992(%rbp)
+	movdqa	-9488(%rbp), %xmm0
+	movaps	%xmm0, -2976(%rbp)
+	movdqa	-2976(%rbp), %xmm1
+	movdqa	-2992(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -9552(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -3024(%rbp)
+	movdqa	-9488(%rbp), %xmm0
+	movaps	%xmm0, -3008(%rbp)
+	movdqa	-3008(%rbp), %xmm1
+	movdqa	-3024(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -9488(%rbp)
+	movdqa	-9536(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-9536(%rbp), %xmm0
+	movaps	%xmm0, -3056(%rbp)
+	movdqa	-9472(%rbp), %xmm0
+	movaps	%xmm0, -3040(%rbp)
+	movdqa	-3040(%rbp), %xmm1
+	movdqa	-3056(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -9536(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -3088(%rbp)
+	movdqa	-9472(%rbp), %xmm0
+	movaps	%xmm0, -3072(%rbp)
+	movdqa	-3072(%rbp), %xmm1
+	movdqa	-3088(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -9472(%rbp)
+	movdqa	-9520(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-9520(%rbp), %xmm0
+	movaps	%xmm0, -3120(%rbp)
+	movdqa	-9456(%rbp), %xmm0
+	movaps	%xmm0, -3104(%rbp)
+	movdqa	-3104(%rbp), %xmm1
+	movdqa	-3120(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -9520(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -3152(%rbp)
+	movdqa	-9456(%rbp), %xmm0
+	movaps	%xmm0, -3136(%rbp)
+	movdqa	-3136(%rbp), %xmm1
+	movdqa	-3152(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -9456(%rbp)
+	movdqa	-9568(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-9568(%rbp), %xmm0
+	movaps	%xmm0, -3184(%rbp)
+	movdqa	-9536(%rbp), %xmm0
+	movaps	%xmm0, -3168(%rbp)
+	movdqa	-3168(%rbp), %xmm1
+	movdqa	-3184(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -9568(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -3216(%rbp)
+	movdqa	-9536(%rbp), %xmm0
+	movaps	%xmm0, -3200(%rbp)
+	movdqa	-3200(%rbp), %xmm1
+	movdqa	-3216(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -9536(%rbp)
+	movdqa	-9552(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-9552(%rbp), %xmm0
+	movaps	%xmm0, -3248(%rbp)
+	movdqa	-9520(%rbp), %xmm0
+	movaps	%xmm0, -3232(%rbp)
+	movdqa	-3232(%rbp), %xmm1
+	movdqa	-3248(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -9552(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -3280(%rbp)
+	movdqa	-9520(%rbp), %xmm0
+	movaps	%xmm0, -3264(%rbp)
+	movdqa	-3264(%rbp), %xmm1
+	movdqa	-3280(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -9520(%rbp)
+	movdqa	-9504(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-9504(%rbp), %xmm0
+	movaps	%xmm0, -3312(%rbp)
+	movdqa	-9472(%rbp), %xmm0
+	movaps	%xmm0, -3296(%rbp)
+	movdqa	-3296(%rbp), %xmm1
+	movdqa	-3312(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -9504(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -3344(%rbp)
+	movdqa	-9472(%rbp), %xmm0
+	movaps	%xmm0, -3328(%rbp)
+	movdqa	-3328(%rbp), %xmm1
+	movdqa	-3344(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -9472(%rbp)
+	movdqa	-9488(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-9488(%rbp), %xmm0
+	movaps	%xmm0, -3376(%rbp)
+	movdqa	-9456(%rbp), %xmm0
+	movaps	%xmm0, -3360(%rbp)
+	movdqa	-3360(%rbp), %xmm1
+	movdqa	-3376(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -9488(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -3408(%rbp)
+	movdqa	-9456(%rbp), %xmm0
+	movaps	%xmm0, -3392(%rbp)
+	movdqa	-3392(%rbp), %xmm1
+	movdqa	-3408(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -9456(%rbp)
+	movdqa	-9568(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-9568(%rbp), %xmm0
+	movaps	%xmm0, -3440(%rbp)
+	movdqa	-9552(%rbp), %xmm0
+	movaps	%xmm0, -3424(%rbp)
+	movdqa	-3424(%rbp), %xmm1
+	movdqa	-3440(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -9568(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -3472(%rbp)
+	movdqa	-9552(%rbp), %xmm0
+	movaps	%xmm0, -3456(%rbp)
+	movdqa	-3456(%rbp), %xmm1
+	movdqa	-3472(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -9552(%rbp)
+	movdqa	-9536(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-9536(%rbp), %xmm0
+	movaps	%xmm0, -3504(%rbp)
+	movdqa	-9520(%rbp), %xmm0
+	movaps	%xmm0, -3488(%rbp)
+	movdqa	-3488(%rbp), %xmm1
+	movdqa	-3504(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -9536(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -3536(%rbp)
+	movdqa	-9520(%rbp), %xmm0
+	movaps	%xmm0, -3520(%rbp)
+	movdqa	-3520(%rbp), %xmm1
+	movdqa	-3536(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -9520(%rbp)
+	movdqa	-9504(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-9504(%rbp), %xmm0
+	movaps	%xmm0, -3568(%rbp)
+	movdqa	-9488(%rbp), %xmm0
+	movaps	%xmm0, -3552(%rbp)
+	movdqa	-3552(%rbp), %xmm1
+	movdqa	-3568(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -9504(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -3600(%rbp)
+	movdqa	-9488(%rbp), %xmm0
+	movaps	%xmm0, -3584(%rbp)
+	movdqa	-3584(%rbp), %xmm1
+	movdqa	-3600(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -9488(%rbp)
+	movdqa	-9472(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-9472(%rbp), %xmm0
+	movaps	%xmm0, -3632(%rbp)
+	movdqa	-9456(%rbp), %xmm0
+	movaps	%xmm0, -3616(%rbp)
+	movdqa	-3616(%rbp), %xmm1
+	movdqa	-3632(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -9472(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -3664(%rbp)
+	movdqa	-9456(%rbp), %xmm0
+	movaps	%xmm0, -3648(%rbp)
+	movdqa	-3648(%rbp), %xmm1
+	movdqa	-3664(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -9456(%rbp)
+	movdqa	-9536(%rbp), %xmm0
+	movaps	%xmm0, -3696(%rbp)
+	movdqa	-9472(%rbp), %xmm0
+	movaps	%xmm0, -3680(%rbp)
+	movdqa	-3680(%rbp), %xmm1
+	movdqa	-3696(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -8272(%rbp)
+	movdqa	-9536(%rbp), %xmm0
+	movaps	%xmm0, -3728(%rbp)
+	movdqa	-9472(%rbp), %xmm0
+	movaps	%xmm0, -3712(%rbp)
+	movdqa	-3712(%rbp), %xmm1
+	movdqa	-3728(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -8256(%rbp)
+	movdqa	-8272(%rbp), %xmm0
+	movaps	%xmm0, -3760(%rbp)
+	movdqa	-9728(%rbp), %xmm0
+	movaps	%xmm0, -3744(%rbp)
+	movdqa	-3744(%rbp), %xmm0
+	movdqa	-3760(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -8240(%rbp)
+	movdqa	-8256(%rbp), %xmm0
+	movaps	%xmm0, -3792(%rbp)
+	movdqa	-9728(%rbp), %xmm0
+	movaps	%xmm0, -3776(%rbp)
+	movdqa	-3776(%rbp), %xmm0
+	movdqa	-3792(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -8224(%rbp)
+	movdqa	-8272(%rbp), %xmm0
+	movaps	%xmm0, -3824(%rbp)
+	movdqa	-9712(%rbp), %xmm0
+	movaps	%xmm0, -3808(%rbp)
+	movdqa	-3808(%rbp), %xmm0
+	movdqa	-3824(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -8208(%rbp)
+	movdqa	-8256(%rbp), %xmm0
+	movaps	%xmm0, -3856(%rbp)
+	movdqa	-9712(%rbp), %xmm0
+	movaps	%xmm0, -3840(%rbp)
+	movdqa	-3840(%rbp), %xmm0
+	movdqa	-3856(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -8192(%rbp)
+	movdqa	-9568(%rbp), %xmm0
+	movaps	%xmm0, -3888(%rbp)
+	movdqa	-9504(%rbp), %xmm0
+	movaps	%xmm0, -3872(%rbp)
+	movdqa	-3888(%rbp), %xmm1
+	movdqa	-3872(%rbp), %xmm0
+	paddw	%xmm1, %xmm0
+	movaps	%xmm0, -8176(%rbp)
+	movdqa	-9568(%rbp), %xmm0
+	movaps	%xmm0, -3920(%rbp)
+	movdqa	-9504(%rbp), %xmm0
+	movaps	%xmm0, -3904(%rbp)
+	movdqa	-3920(%rbp), %xmm0
+	movdqa	-3904(%rbp), %xmm1
+	psubw	%xmm1, %xmm0
+	movaps	%xmm0, -8160(%rbp)
+	pxor	%xmm0, %xmm0
+	movaps	%xmm0, -3952(%rbp)
+	movdqa	-8176(%rbp), %xmm0
+	movaps	%xmm0, -3936(%rbp)
+	movdqa	-3936(%rbp), %xmm1
+	movdqa	-3952(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -3968(%rbp)
+	movl	$4, -9996(%rbp)
+	movdqa	-3968(%rbp), %xmm1
+	movd	-9996(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -8144(%rbp)
+	pxor	%xmm0, %xmm0
+	movaps	%xmm0, -4000(%rbp)
+	movdqa	-8176(%rbp), %xmm0
+	movaps	%xmm0, -3984(%rbp)
+	movdqa	-3984(%rbp), %xmm1
+	movdqa	-4000(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -4016(%rbp)
+	movl	$4, -10000(%rbp)
+	movdqa	-4016(%rbp), %xmm1
+	movd	-10000(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -8128(%rbp)
+	pxor	%xmm0, %xmm0
+	movaps	%xmm0, -4048(%rbp)
+	movdqa	-8160(%rbp), %xmm0
+	movaps	%xmm0, -4032(%rbp)
+	movdqa	-4032(%rbp), %xmm1
+	movdqa	-4048(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -4064(%rbp)
+	movl	$4, -10004(%rbp)
+	movdqa	-4064(%rbp), %xmm1
+	movd	-10004(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -8112(%rbp)
+	pxor	%xmm0, %xmm0
+	movaps	%xmm0, -4096(%rbp)
+	movdqa	-8160(%rbp), %xmm0
+	movaps	%xmm0, -4080(%rbp)
+	movdqa	-4080(%rbp), %xmm1
+	movdqa	-4096(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -4112(%rbp)
+	movl	$4, -10008(%rbp)
+	movdqa	-4112(%rbp), %xmm1
+	movd	-10008(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -8096(%rbp)
+	movdqa	-8144(%rbp), %xmm0
+	movaps	%xmm0, -4144(%rbp)
+	movdqa	-8208(%rbp), %xmm0
+	movaps	%xmm0, -4128(%rbp)
+	movdqa	-4144(%rbp), %xmm1
+	movdqa	-4128(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8080(%rbp)
+	movdqa	-8128(%rbp), %xmm0
+	movaps	%xmm0, -4176(%rbp)
+	movdqa	-8192(%rbp), %xmm0
+	movaps	%xmm0, -4160(%rbp)
+	movdqa	-4176(%rbp), %xmm1
+	movdqa	-4160(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8064(%rbp)
+	movdqa	-8144(%rbp), %xmm0
+	movaps	%xmm0, -4208(%rbp)
+	movdqa	-8208(%rbp), %xmm0
+	movaps	%xmm0, -4192(%rbp)
+	movdqa	-4208(%rbp), %xmm0
+	movdqa	-4192(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -8048(%rbp)
+	movdqa	-8128(%rbp), %xmm0
+	movaps	%xmm0, -4240(%rbp)
+	movdqa	-8192(%rbp), %xmm0
+	movaps	%xmm0, -4224(%rbp)
+	movdqa	-4240(%rbp), %xmm0
+	movdqa	-4224(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -8032(%rbp)
+	movdqa	-8112(%rbp), %xmm0
+	movaps	%xmm0, -4272(%rbp)
+	movdqa	-8240(%rbp), %xmm0
+	movaps	%xmm0, -4256(%rbp)
+	movdqa	-4272(%rbp), %xmm1
+	movdqa	-4256(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8016(%rbp)
+	movdqa	-8096(%rbp), %xmm0
+	movaps	%xmm0, -4304(%rbp)
+	movdqa	-8224(%rbp), %xmm0
+	movaps	%xmm0, -4288(%rbp)
+	movdqa	-4304(%rbp), %xmm1
+	movdqa	-4288(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -8000(%rbp)
+	movdqa	-8112(%rbp), %xmm0
+	movaps	%xmm0, -4336(%rbp)
+	movdqa	-8240(%rbp), %xmm0
+	movaps	%xmm0, -4320(%rbp)
+	movdqa	-4336(%rbp), %xmm0
+	movdqa	-4320(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -7984(%rbp)
+	movdqa	-8096(%rbp), %xmm0
+	movaps	%xmm0, -4368(%rbp)
+	movdqa	-8224(%rbp), %xmm0
+	movaps	%xmm0, -4352(%rbp)
+	movdqa	-4368(%rbp), %xmm0
+	movdqa	-4352(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -7968(%rbp)
+	movdqa	-9456(%rbp), %xmm0
+	movaps	%xmm0, -4400(%rbp)
+	movdqa	-9520(%rbp), %xmm0
+	movaps	%xmm0, -4384(%rbp)
+	movdqa	-4384(%rbp), %xmm1
+	movdqa	-4400(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -7952(%rbp)
+	movdqa	-9456(%rbp), %xmm0
+	movaps	%xmm0, -4432(%rbp)
+	movdqa	-9520(%rbp), %xmm0
+	movaps	%xmm0, -4416(%rbp)
+	movdqa	-4416(%rbp), %xmm1
+	movdqa	-4432(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -7936(%rbp)
+	movdqa	-7952(%rbp), %xmm0
+	movaps	%xmm0, -4464(%rbp)
+	movdqa	-9664(%rbp), %xmm0
+	movaps	%xmm0, -4448(%rbp)
+	movdqa	-4448(%rbp), %xmm0
+	movdqa	-4464(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -7920(%rbp)
+	movdqa	-7936(%rbp), %xmm0
+	movaps	%xmm0, -4496(%rbp)
+	movdqa	-9664(%rbp), %xmm0
+	movaps	%xmm0, -4480(%rbp)
+	movdqa	-4480(%rbp), %xmm0
+	movdqa	-4496(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -7904(%rbp)
+	movdqa	-7952(%rbp), %xmm0
+	movaps	%xmm0, -4528(%rbp)
+	movdqa	-9648(%rbp), %xmm0
+	movaps	%xmm0, -4512(%rbp)
+	movdqa	-4512(%rbp), %xmm0
+	movdqa	-4528(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -7888(%rbp)
+	movdqa	-7936(%rbp), %xmm0
+	movaps	%xmm0, -4560(%rbp)
+	movdqa	-9648(%rbp), %xmm0
+	movaps	%xmm0, -4544(%rbp)
+	movdqa	-4544(%rbp), %xmm0
+	movdqa	-4560(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -7872(%rbp)
+	movdqa	-9488(%rbp), %xmm0
+	movaps	%xmm0, -4592(%rbp)
+	movdqa	-9552(%rbp), %xmm0
+	movaps	%xmm0, -4576(%rbp)
+	movdqa	-4576(%rbp), %xmm1
+	movdqa	-4592(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -7856(%rbp)
+	movdqa	-9488(%rbp), %xmm0
+	movaps	%xmm0, -4624(%rbp)
+	movdqa	-9552(%rbp), %xmm0
+	movaps	%xmm0, -4608(%rbp)
+	movdqa	-4608(%rbp), %xmm1
+	movdqa	-4624(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -7840(%rbp)
+	movdqa	-7856(%rbp), %xmm0
+	movaps	%xmm0, -4656(%rbp)
+	movdqa	-9632(%rbp), %xmm0
+	movaps	%xmm0, -4640(%rbp)
+	movdqa	-4640(%rbp), %xmm0
+	movdqa	-4656(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -7824(%rbp)
+	movdqa	-7840(%rbp), %xmm0
+	movaps	%xmm0, -4688(%rbp)
+	movdqa	-9632(%rbp), %xmm0
+	movaps	%xmm0, -4672(%rbp)
+	movdqa	-4672(%rbp), %xmm0
+	movdqa	-4688(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -7808(%rbp)
+	movdqa	-7856(%rbp), %xmm0
+	movaps	%xmm0, -4720(%rbp)
+	movdqa	-9616(%rbp), %xmm0
+	movaps	%xmm0, -4704(%rbp)
+	movdqa	-4704(%rbp), %xmm0
+	movdqa	-4720(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -7792(%rbp)
+	movdqa	-7840(%rbp), %xmm0
+	movaps	%xmm0, -4752(%rbp)
+	movdqa	-9616(%rbp), %xmm0
+	movaps	%xmm0, -4736(%rbp)
+	movdqa	-4736(%rbp), %xmm0
+	movdqa	-4752(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -7776(%rbp)
+	movdqa	-9552(%rbp), %xmm0
+	movaps	%xmm0, -4784(%rbp)
+	movdqa	-9456(%rbp), %xmm0
+	movaps	%xmm0, -4768(%rbp)
+	movdqa	-4784(%rbp), %xmm1
+	movdqa	-4768(%rbp), %xmm0
+	paddw	%xmm1, %xmm0
+	movaps	%xmm0, -7760(%rbp)
+	movdqa	-9520(%rbp), %xmm0
+	movaps	%xmm0, -4816(%rbp)
+	movdqa	-9488(%rbp), %xmm0
+	movaps	%xmm0, -4800(%rbp)
+	movdqa	-4816(%rbp), %xmm1
+	movdqa	-4800(%rbp), %xmm0
+	paddw	%xmm1, %xmm0
+	movaps	%xmm0, -7744(%rbp)
+	movdqa	-7760(%rbp), %xmm0
+	movaps	%xmm0, -4848(%rbp)
+	movdqa	-7744(%rbp), %xmm0
+	movaps	%xmm0, -4832(%rbp)
+	movdqa	-4832(%rbp), %xmm1
+	movdqa	-4848(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -7728(%rbp)
+	movdqa	-7760(%rbp), %xmm0
+	movaps	%xmm0, -4880(%rbp)
+	movdqa	-7744(%rbp), %xmm0
+	movaps	%xmm0, -4864(%rbp)
+	movdqa	-4864(%rbp), %xmm1
+	movdqa	-4880(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -7712(%rbp)
+	movdqa	-7728(%rbp), %xmm0
+	movaps	%xmm0, -4912(%rbp)
+	movdqa	-9696(%rbp), %xmm0
+	movaps	%xmm0, -4896(%rbp)
+	movdqa	-4896(%rbp), %xmm0
+	movdqa	-4912(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -7696(%rbp)
+	movdqa	-7712(%rbp), %xmm0
+	movaps	%xmm0, -4944(%rbp)
+	movdqa	-9696(%rbp), %xmm0
+	movaps	%xmm0, -4928(%rbp)
+	movdqa	-4928(%rbp), %xmm0
+	movdqa	-4944(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -7680(%rbp)
+	movdqa	-7728(%rbp), %xmm0
+	movaps	%xmm0, -4976(%rbp)
+	movdqa	-9680(%rbp), %xmm0
+	movaps	%xmm0, -4960(%rbp)
+	movdqa	-4960(%rbp), %xmm0
+	movdqa	-4976(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -7664(%rbp)
+	movdqa	-7712(%rbp), %xmm0
+	movaps	%xmm0, -5008(%rbp)
+	movdqa	-9680(%rbp), %xmm0
+	movaps	%xmm0, -4992(%rbp)
+	movdqa	-4992(%rbp), %xmm0
+	movdqa	-5008(%rbp), %xmm1
+	pmaddwd	%xmm1, %xmm0
+	movaps	%xmm0, -7648(%rbp)
+	movdqa	-7920(%rbp), %xmm0
+	movaps	%xmm0, -5040(%rbp)
+	movdqa	-7696(%rbp), %xmm0
+	movaps	%xmm0, -5024(%rbp)
+	movdqa	-5040(%rbp), %xmm1
+	movdqa	-5024(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7632(%rbp)
+	movdqa	-7904(%rbp), %xmm0
+	movaps	%xmm0, -5072(%rbp)
+	movdqa	-7680(%rbp), %xmm0
+	movaps	%xmm0, -5056(%rbp)
+	movdqa	-5072(%rbp), %xmm1
+	movdqa	-5056(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7616(%rbp)
+	movdqa	-7824(%rbp), %xmm0
+	movaps	%xmm0, -5104(%rbp)
+	movdqa	-7664(%rbp), %xmm0
+	movaps	%xmm0, -5088(%rbp)
+	movdqa	-5104(%rbp), %xmm1
+	movdqa	-5088(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7600(%rbp)
+	movdqa	-7808(%rbp), %xmm0
+	movaps	%xmm0, -5136(%rbp)
+	movdqa	-7648(%rbp), %xmm0
+	movaps	%xmm0, -5120(%rbp)
+	movdqa	-5136(%rbp), %xmm1
+	movdqa	-5120(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7584(%rbp)
+	movdqa	-7888(%rbp), %xmm0
+	movaps	%xmm0, -5168(%rbp)
+	movdqa	-7664(%rbp), %xmm0
+	movaps	%xmm0, -5152(%rbp)
+	movdqa	-5168(%rbp), %xmm1
+	movdqa	-5152(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7568(%rbp)
+	movdqa	-7872(%rbp), %xmm0
+	movaps	%xmm0, -5200(%rbp)
+	movdqa	-7648(%rbp), %xmm0
+	movaps	%xmm0, -5184(%rbp)
+	movdqa	-5200(%rbp), %xmm1
+	movdqa	-5184(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7552(%rbp)
+	movdqa	-7792(%rbp), %xmm0
+	movaps	%xmm0, -5232(%rbp)
+	movdqa	-7696(%rbp), %xmm0
+	movaps	%xmm0, -5216(%rbp)
+	movdqa	-5232(%rbp), %xmm1
+	movdqa	-5216(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7536(%rbp)
+	movdqa	-7776(%rbp), %xmm0
+	movaps	%xmm0, -5264(%rbp)
+	movdqa	-7680(%rbp), %xmm0
+	movaps	%xmm0, -5248(%rbp)
+	movdqa	-5264(%rbp), %xmm1
+	movdqa	-5248(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7520(%rbp)
+	movdqa	-8080(%rbp), %xmm0
+	movaps	%xmm0, -5296(%rbp)
+	movdqa	-9584(%rbp), %xmm0
+	movaps	%xmm0, -5280(%rbp)
+	movdqa	-5296(%rbp), %xmm1
+	movdqa	-5280(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7504(%rbp)
+	movdqa	-8064(%rbp), %xmm0
+	movaps	%xmm0, -5328(%rbp)
+	movdqa	-9584(%rbp), %xmm0
+	movaps	%xmm0, -5312(%rbp)
+	movdqa	-5328(%rbp), %xmm1
+	movdqa	-5312(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7488(%rbp)
+	movdqa	-7504(%rbp), %xmm0
+	movaps	%xmm0, -5360(%rbp)
+	movdqa	-7536(%rbp), %xmm0
+	movaps	%xmm0, -5344(%rbp)
+	movdqa	-5360(%rbp), %xmm1
+	movdqa	-5344(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7472(%rbp)
+	movdqa	-7488(%rbp), %xmm0
+	movaps	%xmm0, -5392(%rbp)
+	movdqa	-7520(%rbp), %xmm0
+	movaps	%xmm0, -5376(%rbp)
+	movdqa	-5392(%rbp), %xmm1
+	movdqa	-5376(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7456(%rbp)
+	movdqa	-7504(%rbp), %xmm0
+	movaps	%xmm0, -5424(%rbp)
+	movdqa	-7536(%rbp), %xmm0
+	movaps	%xmm0, -5408(%rbp)
+	movdqa	-5424(%rbp), %xmm0
+	movdqa	-5408(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -7440(%rbp)
+	movdqa	-7488(%rbp), %xmm0
+	movaps	%xmm0, -5456(%rbp)
+	movdqa	-7520(%rbp), %xmm0
+	movaps	%xmm0, -5440(%rbp)
+	movdqa	-5456(%rbp), %xmm0
+	movdqa	-5440(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -7424(%rbp)
+	movdqa	-7456(%rbp), %xmm0
+	movaps	%xmm0, -5472(%rbp)
+	movl	$17, -10012(%rbp)
+	movdqa	-5472(%rbp), %xmm1
+	movd	-10012(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm2
+	movdqa	-7472(%rbp), %xmm0
+	movaps	%xmm0, -5488(%rbp)
+	movl	$17, -10016(%rbp)
+	movdqa	-5488(%rbp), %xmm1
+	movd	-10016(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -5520(%rbp)
+	movaps	%xmm2, -5504(%rbp)
+	movdqa	-5504(%rbp), %xmm1
+	movdqa	-5520(%rbp), %xmm0
+	packssdw	%xmm1, %xmm0
+	movaps	%xmm0, -9568(%rbp)
+	movdqa	-7424(%rbp), %xmm0
+	movaps	%xmm0, -5536(%rbp)
+	movl	$17, -10020(%rbp)
+	movdqa	-5536(%rbp), %xmm1
+	movd	-10020(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm2
+	movdqa	-7440(%rbp), %xmm0
+	movaps	%xmm0, -5552(%rbp)
+	movl	$17, -10024(%rbp)
+	movdqa	-5552(%rbp), %xmm1
+	movd	-10024(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -5584(%rbp)
+	movaps	%xmm2, -5568(%rbp)
+	movdqa	-5568(%rbp), %xmm1
+	movdqa	-5584(%rbp), %xmm0
+	packssdw	%xmm1, %xmm0
+	movaps	%xmm0, -9456(%rbp)
+	movdqa	-8016(%rbp), %xmm0
+	movaps	%xmm0, -5616(%rbp)
+	movdqa	-9584(%rbp), %xmm0
+	movaps	%xmm0, -5600(%rbp)
+	movdqa	-5616(%rbp), %xmm1
+	movdqa	-5600(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7408(%rbp)
+	movdqa	-8000(%rbp), %xmm0
+	movaps	%xmm0, -5648(%rbp)
+	movdqa	-9584(%rbp), %xmm0
+	movaps	%xmm0, -5632(%rbp)
+	movdqa	-5648(%rbp), %xmm1
+	movdqa	-5632(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7392(%rbp)
+	movdqa	-7408(%rbp), %xmm0
+	movaps	%xmm0, -5680(%rbp)
+	movdqa	-7568(%rbp), %xmm0
+	movaps	%xmm0, -5664(%rbp)
+	movdqa	-5680(%rbp), %xmm1
+	movdqa	-5664(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7376(%rbp)
+	movdqa	-7392(%rbp), %xmm0
+	movaps	%xmm0, -5712(%rbp)
+	movdqa	-7552(%rbp), %xmm0
+	movaps	%xmm0, -5696(%rbp)
+	movdqa	-5712(%rbp), %xmm1
+	movdqa	-5696(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7360(%rbp)
+	movdqa	-7408(%rbp), %xmm0
+	movaps	%xmm0, -5744(%rbp)
+	movdqa	-7568(%rbp), %xmm0
+	movaps	%xmm0, -5728(%rbp)
+	movdqa	-5744(%rbp), %xmm0
+	movdqa	-5728(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -7344(%rbp)
+	movdqa	-7392(%rbp), %xmm0
+	movaps	%xmm0, -5776(%rbp)
+	movdqa	-7552(%rbp), %xmm0
+	movaps	%xmm0, -5760(%rbp)
+	movdqa	-5776(%rbp), %xmm0
+	movdqa	-5760(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -7328(%rbp)
+	movdqa	-7360(%rbp), %xmm0
+	movaps	%xmm0, -5792(%rbp)
+	movl	$17, -10028(%rbp)
+	movdqa	-5792(%rbp), %xmm1
+	movd	-10028(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm2
+	movdqa	-7376(%rbp), %xmm0
+	movaps	%xmm0, -5808(%rbp)
+	movl	$17, -10032(%rbp)
+	movdqa	-5808(%rbp), %xmm1
+	movd	-10032(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -5840(%rbp)
+	movaps	%xmm2, -5824(%rbp)
+	movdqa	-5824(%rbp), %xmm1
+	movdqa	-5840(%rbp), %xmm0
+	packssdw	%xmm1, %xmm0
+	movaps	%xmm0, -9552(%rbp)
+	movdqa	-7328(%rbp), %xmm0
+	movaps	%xmm0, -5856(%rbp)
+	movl	$17, -10036(%rbp)
+	movdqa	-5856(%rbp), %xmm1
+	movd	-10036(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm2
+	movdqa	-7344(%rbp), %xmm0
+	movaps	%xmm0, -5872(%rbp)
+	movl	$17, -10040(%rbp)
+	movdqa	-5872(%rbp), %xmm1
+	movd	-10040(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -5904(%rbp)
+	movaps	%xmm2, -5888(%rbp)
+	movdqa	-5888(%rbp), %xmm1
+	movdqa	-5904(%rbp), %xmm0
+	packssdw	%xmm1, %xmm0
+	movaps	%xmm0, -9472(%rbp)
+	movdqa	-7984(%rbp), %xmm0
+	movaps	%xmm0, -5936(%rbp)
+	movdqa	-9584(%rbp), %xmm0
+	movaps	%xmm0, -5920(%rbp)
+	movdqa	-5936(%rbp), %xmm1
+	movdqa	-5920(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7312(%rbp)
+	movdqa	-7968(%rbp), %xmm0
+	movaps	%xmm0, -5968(%rbp)
+	movdqa	-9584(%rbp), %xmm0
+	movaps	%xmm0, -5952(%rbp)
+	movdqa	-5968(%rbp), %xmm1
+	movdqa	-5952(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7296(%rbp)
+	movdqa	-7312(%rbp), %xmm0
+	movaps	%xmm0, -6000(%rbp)
+	movdqa	-7600(%rbp), %xmm0
+	movaps	%xmm0, -5984(%rbp)
+	movdqa	-6000(%rbp), %xmm1
+	movdqa	-5984(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7280(%rbp)
+	movdqa	-7296(%rbp), %xmm0
+	movaps	%xmm0, -6032(%rbp)
+	movdqa	-7584(%rbp), %xmm0
+	movaps	%xmm0, -6016(%rbp)
+	movdqa	-6032(%rbp), %xmm1
+	movdqa	-6016(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7264(%rbp)
+	movdqa	-7312(%rbp), %xmm0
+	movaps	%xmm0, -6064(%rbp)
+	movdqa	-7600(%rbp), %xmm0
+	movaps	%xmm0, -6048(%rbp)
+	movdqa	-6064(%rbp), %xmm0
+	movdqa	-6048(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -7248(%rbp)
+	movdqa	-7296(%rbp), %xmm0
+	movaps	%xmm0, -6096(%rbp)
+	movdqa	-7584(%rbp), %xmm0
+	movaps	%xmm0, -6080(%rbp)
+	movdqa	-6096(%rbp), %xmm0
+	movdqa	-6080(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -7232(%rbp)
+	movdqa	-7264(%rbp), %xmm0
+	movaps	%xmm0, -6112(%rbp)
+	movl	$17, -10044(%rbp)
+	movdqa	-6112(%rbp), %xmm1
+	movd	-10044(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm2
+	movdqa	-7280(%rbp), %xmm0
+	movaps	%xmm0, -6128(%rbp)
+	movl	$17, -10048(%rbp)
+	movdqa	-6128(%rbp), %xmm1
+	movd	-10048(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -6160(%rbp)
+	movaps	%xmm2, -6144(%rbp)
+	movdqa	-6144(%rbp), %xmm1
+	movdqa	-6160(%rbp), %xmm0
+	packssdw	%xmm1, %xmm0
+	movaps	%xmm0, -9536(%rbp)
+	movdqa	-7232(%rbp), %xmm0
+	movaps	%xmm0, -6176(%rbp)
+	movl	$17, -10052(%rbp)
+	movdqa	-6176(%rbp), %xmm1
+	movd	-10052(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm2
+	movdqa	-7248(%rbp), %xmm0
+	movaps	%xmm0, -6192(%rbp)
+	movl	$17, -10056(%rbp)
+	movdqa	-6192(%rbp), %xmm1
+	movd	-10056(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -6224(%rbp)
+	movaps	%xmm2, -6208(%rbp)
+	movdqa	-6208(%rbp), %xmm1
+	movdqa	-6224(%rbp), %xmm0
+	packssdw	%xmm1, %xmm0
+	movaps	%xmm0, -9488(%rbp)
+	movdqa	-8048(%rbp), %xmm0
+	movaps	%xmm0, -6256(%rbp)
+	movdqa	-9584(%rbp), %xmm0
+	movaps	%xmm0, -6240(%rbp)
+	movdqa	-6256(%rbp), %xmm1
+	movdqa	-6240(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7216(%rbp)
+	movdqa	-8032(%rbp), %xmm0
+	movaps	%xmm0, -6288(%rbp)
+	movdqa	-9584(%rbp), %xmm0
+	movaps	%xmm0, -6272(%rbp)
+	movdqa	-6288(%rbp), %xmm1
+	movdqa	-6272(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7200(%rbp)
+	movdqa	-7216(%rbp), %xmm0
+	movaps	%xmm0, -6320(%rbp)
+	movdqa	-7632(%rbp), %xmm0
+	movaps	%xmm0, -6304(%rbp)
+	movdqa	-6320(%rbp), %xmm1
+	movdqa	-6304(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7184(%rbp)
+	movdqa	-7200(%rbp), %xmm0
+	movaps	%xmm0, -6352(%rbp)
+	movdqa	-7616(%rbp), %xmm0
+	movaps	%xmm0, -6336(%rbp)
+	movdqa	-6352(%rbp), %xmm1
+	movdqa	-6336(%rbp), %xmm0
+	paddd	%xmm1, %xmm0
+	movaps	%xmm0, -7168(%rbp)
+	movdqa	-7216(%rbp), %xmm0
+	movaps	%xmm0, -6384(%rbp)
+	movdqa	-7632(%rbp), %xmm0
+	movaps	%xmm0, -6368(%rbp)
+	movdqa	-6384(%rbp), %xmm0
+	movdqa	-6368(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -7152(%rbp)
+	movdqa	-7200(%rbp), %xmm0
+	movaps	%xmm0, -6416(%rbp)
+	movdqa	-7616(%rbp), %xmm0
+	movaps	%xmm0, -6400(%rbp)
+	movdqa	-6416(%rbp), %xmm0
+	movdqa	-6400(%rbp), %xmm1
+	psubd	%xmm1, %xmm0
+	movaps	%xmm0, -7136(%rbp)
+	movdqa	-7168(%rbp), %xmm0
+	movaps	%xmm0, -6432(%rbp)
+	movl	$17, -10060(%rbp)
+	movdqa	-6432(%rbp), %xmm1
+	movd	-10060(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm2
+	movdqa	-7184(%rbp), %xmm0
+	movaps	%xmm0, -6448(%rbp)
+	movl	$17, -10064(%rbp)
+	movdqa	-6448(%rbp), %xmm1
+	movd	-10064(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -6480(%rbp)
+	movaps	%xmm2, -6464(%rbp)
+	movdqa	-6464(%rbp), %xmm1
+	movdqa	-6480(%rbp), %xmm0
+	packssdw	%xmm1, %xmm0
+	movaps	%xmm0, -9520(%rbp)
+	movdqa	-7136(%rbp), %xmm0
+	movaps	%xmm0, -6496(%rbp)
+	movl	$17, -10068(%rbp)
+	movdqa	-6496(%rbp), %xmm1
+	movd	-10068(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm2
+	movdqa	-7152(%rbp), %xmm0
+	movaps	%xmm0, -6512(%rbp)
+	movl	$17, -10072(%rbp)
+	movdqa	-6512(%rbp), %xmm1
+	movd	-10072(%rbp), %xmm0
+	psrad	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -6544(%rbp)
+	movaps	%xmm2, -6528(%rbp)
+	movdqa	-6528(%rbp), %xmm1
+	movdqa	-6544(%rbp), %xmm0
+	packssdw	%xmm1, %xmm0
+	movaps	%xmm0, -9504(%rbp)
+	movdqa	-9568(%rbp), %xmm0
+	movaps	%xmm0, -6576(%rbp)
+	movdqa	-9552(%rbp), %xmm0
+	movaps	%xmm0, -6560(%rbp)
+	movdqa	-6560(%rbp), %xmm1
+	movdqa	-6576(%rbp), %xmm0
+	packuswb	%xmm1, %xmm0
+	movaps	%xmm0, -7120(%rbp)
+	movdqa	-9536(%rbp), %xmm0
+	movaps	%xmm0, -6608(%rbp)
+	movdqa	-9520(%rbp), %xmm0
+	movaps	%xmm0, -6592(%rbp)
+	movdqa	-6592(%rbp), %xmm1
+	movdqa	-6608(%rbp), %xmm0
+	packuswb	%xmm1, %xmm0
+	movaps	%xmm0, -7104(%rbp)
+	movdqa	-9504(%rbp), %xmm0
+	movaps	%xmm0, -6640(%rbp)
+	movdqa	-9488(%rbp), %xmm0
+	movaps	%xmm0, -6624(%rbp)
+	movdqa	-6624(%rbp), %xmm1
+	movdqa	-6640(%rbp), %xmm0
+	packuswb	%xmm1, %xmm0
+	movaps	%xmm0, -7088(%rbp)
+	movdqa	-9472(%rbp), %xmm0
+	movaps	%xmm0, -6672(%rbp)
+	movdqa	-9456(%rbp), %xmm0
+	movaps	%xmm0, -6656(%rbp)
+	movdqa	-6656(%rbp), %xmm1
+	movdqa	-6672(%rbp), %xmm0
+	packuswb	%xmm1, %xmm0
+	movaps	%xmm0, -7072(%rbp)
+	movdqa	-7120(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-7120(%rbp), %xmm0
+	movaps	%xmm0, -6704(%rbp)
+	movdqa	-7088(%rbp), %xmm0
+	movaps	%xmm0, -6688(%rbp)
+	movdqa	-6688(%rbp), %xmm1
+	movdqa	-6704(%rbp), %xmm0
+	punpcklbw	%xmm1, %xmm0
+	movaps	%xmm0, -7120(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -6736(%rbp)
+	movdqa	-7088(%rbp), %xmm0
+	movaps	%xmm0, -6720(%rbp)
+	movdqa	-6720(%rbp), %xmm1
+	movdqa	-6736(%rbp), %xmm0
+	punpckhbw	%xmm1, %xmm0
+	movaps	%xmm0, -7088(%rbp)
+	movdqa	-7104(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-7104(%rbp), %xmm0
+	movaps	%xmm0, -6768(%rbp)
+	movdqa	-7072(%rbp), %xmm0
+	movaps	%xmm0, -6752(%rbp)
+	movdqa	-6752(%rbp), %xmm1
+	movdqa	-6768(%rbp), %xmm0
+	punpcklbw	%xmm1, %xmm0
+	movaps	%xmm0, -7104(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -6800(%rbp)
+	movdqa	-7072(%rbp), %xmm0
+	movaps	%xmm0, -6784(%rbp)
+	movdqa	-6784(%rbp), %xmm1
+	movdqa	-6800(%rbp), %xmm0
+	punpckhbw	%xmm1, %xmm0
+	movaps	%xmm0, -7072(%rbp)
+	movdqa	-7120(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-7120(%rbp), %xmm0
+	movaps	%xmm0, -6832(%rbp)
+	movdqa	-7104(%rbp), %xmm0
+	movaps	%xmm0, -6816(%rbp)
+	movdqa	-6816(%rbp), %xmm1
+	movdqa	-6832(%rbp), %xmm0
+	punpcklbw	%xmm1, %xmm0
+	movaps	%xmm0, -7120(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -6864(%rbp)
+	movdqa	-7104(%rbp), %xmm0
+	movaps	%xmm0, -6848(%rbp)
+	movdqa	-6848(%rbp), %xmm1
+	movdqa	-6864(%rbp), %xmm0
+	punpckhbw	%xmm1, %xmm0
+	movaps	%xmm0, -7104(%rbp)
+	movdqa	-7088(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-7088(%rbp), %xmm0
+	movaps	%xmm0, -6896(%rbp)
+	movdqa	-7072(%rbp), %xmm0
+	movaps	%xmm0, -6880(%rbp)
+	movdqa	-6880(%rbp), %xmm1
+	movdqa	-6896(%rbp), %xmm0
+	punpcklbw	%xmm1, %xmm0
+	movaps	%xmm0, -7088(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -6928(%rbp)
+	movdqa	-7072(%rbp), %xmm0
+	movaps	%xmm0, -6912(%rbp)
+	movdqa	-6912(%rbp), %xmm1
+	movdqa	-6928(%rbp), %xmm0
+	punpckhbw	%xmm1, %xmm0
+	movaps	%xmm0, -7072(%rbp)
+	movdqa	-7120(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-7120(%rbp), %xmm0
+	movaps	%xmm0, -6960(%rbp)
+	movdqa	-7088(%rbp), %xmm0
+	movaps	%xmm0, -6944(%rbp)
+	movdqa	-6944(%rbp), %xmm1
+	movdqa	-6960(%rbp), %xmm0
+	punpcklbw	%xmm1, %xmm0
+	movaps	%xmm0, -7120(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -6992(%rbp)
+	movdqa	-7088(%rbp), %xmm0
+	movaps	%xmm0, -6976(%rbp)
+	movdqa	-6976(%rbp), %xmm1
+	movdqa	-6992(%rbp), %xmm0
+	punpckhbw	%xmm1, %xmm0
+	movaps	%xmm0, -7088(%rbp)
+	movdqa	-7104(%rbp), %xmm0
+	movaps	%xmm0, -8288(%rbp)
+	movdqa	-7104(%rbp), %xmm0
+	movaps	%xmm0, -7024(%rbp)
+	movdqa	-7072(%rbp), %xmm0
+	movaps	%xmm0, -7008(%rbp)
+	movdqa	-7008(%rbp), %xmm1
+	movdqa	-7024(%rbp), %xmm0
+	punpcklbw	%xmm1, %xmm0
+	movaps	%xmm0, -7104(%rbp)
+	movdqa	-8288(%rbp), %xmm0
+	movaps	%xmm0, -7056(%rbp)
+	movdqa	-7072(%rbp), %xmm0
+	movaps	%xmm0, -7040(%rbp)
+	movdqa	-7040(%rbp), %xmm1
+	movdqa	-7056(%rbp), %xmm0
+	punpckhbw	%xmm1, %xmm0
+	movaps	%xmm0, -7072(%rbp)
+	movq	-10344(%rbp), %rax
+	movq	%rax, -9816(%rbp)
+	movdqa	-7120(%rbp), %xmm0
+	movaps	%xmm0, -9744(%rbp)
+	movq	-9744(%rbp), %rax
+	movq	%rax, %rdx
+	movq	-9816(%rbp), %rax
+	movq	%rdx, (%rax)
+	nop
+	movl	-10348(%rbp), %eax
+	cltq
+	addq	%rax, -10344(%rbp)
+	movdqa	-7120(%rbp), %xmm0
+	pshufd	$78, %xmm0, %xmm0
+	movq	-10344(%rbp), %rax
+	movq	%rax, -9824(%rbp)
+	movaps	%xmm0, -9744(%rbp)
+	movq	-9744(%rbp), %rax
+	movq	%rax, %rdx
+	movq	-9824(%rbp), %rax
+	movq	%rdx, (%rax)
+	nop
+	movl	-10348(%rbp), %eax
+	cltq
+	addq	%rax, -10344(%rbp)
+	movq	-10344(%rbp), %rax
+	movq	%rax, -9832(%rbp)
+	movdqa	-7088(%rbp), %xmm0
+	movaps	%xmm0, -9744(%rbp)
+	movq	-9744(%rbp), %rax
+	movq	%rax, %rdx
+	movq	-9832(%rbp), %rax
+	movq	%rdx, (%rax)
+	nop
+	movl	-10348(%rbp), %eax
+	cltq
+	addq	%rax, -10344(%rbp)
+	movdqa	-7088(%rbp), %xmm0
+	pshufd	$78, %xmm0, %xmm0
+	movq	-10344(%rbp), %rax
+	movq	%rax, -9840(%rbp)
+	movaps	%xmm0, -9744(%rbp)
+	movq	-9744(%rbp), %rax
+	movq	%rax, %rdx
+	movq	-9840(%rbp), %rax
+	movq	%rdx, (%rax)
+	nop
+	movl	-10348(%rbp), %eax
+	cltq
+	addq	%rax, -10344(%rbp)
+	movq	-10344(%rbp), %rax
+	movq	%rax, -9848(%rbp)
+	movdqa	-7104(%rbp), %xmm0
+	movaps	%xmm0, -9744(%rbp)
+	movq	-9744(%rbp), %rax
+	movq	%rax, %rdx
+	movq	-9848(%rbp), %rax
+	movq	%rdx, (%rax)
+	nop
+	movl	-10348(%rbp), %eax
+	cltq
+	addq	%rax, -10344(%rbp)
+	movdqa	-7104(%rbp), %xmm0
+	pshufd	$78, %xmm0, %xmm0
+	movq	-10344(%rbp), %rax
+	movq	%rax, -9856(%rbp)
+	movaps	%xmm0, -9744(%rbp)
+	movq	-9744(%rbp), %rax
+	movq	%rax, %rdx
+	movq	-9856(%rbp), %rax
+	movq	%rdx, (%rax)
+	nop
+	movl	-10348(%rbp), %eax
+	cltq
+	addq	%rax, -10344(%rbp)
+	movq	-10344(%rbp), %rax
+	movq	%rax, -9864(%rbp)
+	movdqa	-7072(%rbp), %xmm0
+	movaps	%xmm0, -9744(%rbp)
+	movq	-9744(%rbp), %rax
+	movq	%rax, %rdx
+	movq	-9864(%rbp), %rax
+	movq	%rdx, (%rax)
+	nop
+	movl	-10348(%rbp), %eax
+	cltq
+	addq	%rax, -10344(%rbp)
+	movdqa	-7072(%rbp), %xmm0
+	pshufd	$78, %xmm0, %xmm0
+	movq	-10344(%rbp), %rax
+	movq	%rax, -9872(%rbp)
+	movaps	%xmm0, -9744(%rbp)
+	movq	-9744(%rbp), %rax
+	movq	%rax, %rdx
+	movq	-9872(%rbp), %rax
+	movq	%rdx, (%rax)
+	nop
+	nop
+	movq	-8(%rbp), %rax
+	subq	%fs:40, %rax
+	je	.L781
+	call	__stack_chk_fail@PLT
+.L781:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4961:
+	.size	stbi__idct_simd, .-stbi__idct_simd
+	.type	stbi__get_marker, @function
+stbi__get_marker:
+.LFB4962:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movzbl	18472(%rax), %eax
+	cmpb	$-1, %al
+	je	.L783
+	movq	-24(%rbp), %rax
+	movzbl	18472(%rax), %eax
+	movb	%al, -1(%rbp)
+	movq	-24(%rbp), %rax
+	movb	$-1, 18472(%rax)
+	movzbl	-1(%rbp), %eax
+	jmp	.L784
+.L783:
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -1(%rbp)
+	cmpb	$-1, -1(%rbp)
+	je	.L786
+	movl	$-1, %eax
+	jmp	.L784
+.L787:
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -1(%rbp)
+.L786:
+	cmpb	$-1, -1(%rbp)
+	je	.L787
+	movzbl	-1(%rbp), %eax
+.L784:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4962:
+	.size	stbi__get_marker, .-stbi__get_marker
+	.type	stbi__jpeg_reset, @function
+stbi__jpeg_reset:
+.LFB4963:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movl	$0, 18468(%rax)
+	movq	-8(%rbp), %rax
+	movl	$0, 18464(%rax)
+	movq	-8(%rbp), %rax
+	movl	$0, 18476(%rax)
+	movq	-8(%rbp), %rax
+	movl	$0, 18392(%rax)
+	movq	-8(%rbp), %rax
+	movl	18392(%rax), %edx
+	movq	-8(%rbp), %rax
+	movl	%edx, 18296(%rax)
+	movq	-8(%rbp), %rax
+	movl	18296(%rax), %edx
+	movq	-8(%rbp), %rax
+	movl	%edx, 18200(%rax)
+	movq	-8(%rbp), %rax
+	movl	18200(%rax), %edx
+	movq	-8(%rbp), %rax
+	movl	%edx, 18104(%rax)
+	movq	-8(%rbp), %rax
+	movb	$-1, 18472(%rax)
+	movq	-8(%rbp), %rax
+	movl	18536(%rax), %eax
+	testl	%eax, %eax
+	je	.L789
+	movq	-8(%rbp), %rax
+	movl	18536(%rax), %eax
+	jmp	.L790
+.L789:
+	movl	$2147483647, %eax
+.L790:
+	movq	-8(%rbp), %rdx
+	movl	%eax, 18540(%rdx)
+	movq	-8(%rbp), %rax
+	movl	$0, 18500(%rax)
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4963:
+	.size	stbi__jpeg_reset, .-stbi__jpeg_reset
+	.type	stbi__parse_entropy_coded_data, @function
+stbi__parse_entropy_coded_data:
+.LFB4964:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$304, %rsp
+	movq	%rdi, -296(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-296(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__jpeg_reset
+	movq	-296(%rbp), %rax
+	movl	18480(%rax), %eax
+	testl	%eax, %eax
+	jne	.L792
+	movq	-296(%rbp), %rax
+	movl	18516(%rax), %eax
+	cmpl	$1, %eax
+	jne	.L793
+	movq	-296(%rbp), %rax
+	movl	18520(%rax), %eax
+	movl	%eax, -176(%rbp)
+	movq	-296(%rbp), %rcx
+	movl	-176(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18108, %rax
+	movl	(%rax), %eax
+	addl	$7, %eax
+	sarl	$3, %eax
+	movl	%eax, -172(%rbp)
+	movq	-296(%rbp), %rcx
+	movl	-176(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18112, %rax
+	movl	(%rax), %eax
+	addl	$7, %eax
+	sarl	$3, %eax
+	movl	%eax, -168(%rbp)
+	movl	$0, -272(%rbp)
+	jmp	.L794
+.L803:
+	movl	$0, -276(%rbp)
+	jmp	.L795
+.L802:
+	movq	-296(%rbp), %rcx
+	movl	-176(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18100, %rax
+	movl	(%rax), %eax
+	movl	%eax, -164(%rbp)
+	movq	-296(%rbp), %rcx
+	movl	-176(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18092, %rax
+	movl	(%rax), %eax
+	cltq
+	addq	$105, %rax
+	salq	$7, %rax
+	movq	%rax, %rdx
+	movq	-296(%rbp), %rax
+	addq	%rdx, %rax
+	leaq	8(%rax), %rdi
+	movl	-164(%rbp), %eax
+	cltq
+	salq	$10, %rax
+	leaq	13952(%rax), %rdx
+	movq	-296(%rbp), %rax
+	addq	%rdx, %rax
+	leaq	8(%rax), %r10
+	movq	-296(%rbp), %rax
+	leaq	6728(%rax), %rdx
+	movl	-164(%rbp), %eax
+	cltq
+	imulq	$1680, %rax, %rax
+	leaq	(%rdx,%rax), %rcx
+	movq	-296(%rbp), %rax
+	leaq	8(%rax), %r8
+	movq	-296(%rbp), %rsi
+	movl	-176(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18096, %rax
+	movl	(%rax), %eax
+	cltq
+	imulq	$1680, %rax, %rax
+	leaq	(%r8,%rax), %rdx
+	movl	-176(%rbp), %r8d
+	leaq	-144(%rbp), %rsi
+	movq	-296(%rbp), %rax
+	subq	$8, %rsp
+	pushq	%rdi
+	movl	%r8d, %r9d
+	movq	%r10, %r8
+	movq	%rax, %rdi
+	call	stbi__jpeg_decode_block
+	addq	$16, %rsp
+	testl	%eax, %eax
+	jne	.L796
+	movl	$0, %eax
+	jmp	.L804
+.L796:
+	movq	-296(%rbp), %rax
+	movq	18544(%rax), %r8
+	movq	-296(%rbp), %rcx
+	movl	-176(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18116, %rax
+	movl	(%rax), %ecx
+	movq	-296(%rbp), %rsi
+	movl	-176(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18128, %rax
+	movq	(%rax), %rsi
+	movq	-296(%rbp), %rdi
+	movl	-176(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rdi, %rax
+	addq	$18116, %rax
+	movl	(%rax), %eax
+	imull	-272(%rbp), %eax
+	sall	$3, %eax
+	movslq	%eax, %rdx
+	movl	-276(%rbp), %eax
+	sall	$3, %eax
+	cltq
+	addq	%rdx, %rax
+	leaq	(%rsi,%rax), %rdi
+	leaq	-144(%rbp), %rax
+	movq	%rax, %rdx
+	movl	%ecx, %esi
+	call	*%r8
+	movq	-296(%rbp), %rax
+	movl	18540(%rax), %eax
+	leal	-1(%rax), %edx
+	movq	-296(%rbp), %rax
+	movl	%edx, 18540(%rax)
+	movq	-296(%rbp), %rax
+	movl	18540(%rax), %eax
+	testl	%eax, %eax
+	jg	.L798
+	movq	-296(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	$23, %eax
+	jg	.L799
+	movq	-296(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__grow_buffer_unsafe
+.L799:
+	movq	-296(%rbp), %rax
+	movzbl	18472(%rax), %eax
+	cmpb	$-49, %al
+	jbe	.L800
+	movq	-296(%rbp), %rax
+	movzbl	18472(%rax), %eax
+	cmpb	$-41, %al
+	jbe	.L801
+.L800:
+	movl	$1, %eax
+	jmp	.L804
+.L801:
+	movq	-296(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__jpeg_reset
+.L798:
+	addl	$1, -276(%rbp)
+.L795:
+	movl	-276(%rbp), %eax
+	cmpl	-172(%rbp), %eax
+	jl	.L802
+	addl	$1, -272(%rbp)
+.L794:
+	movl	-272(%rbp), %eax
+	cmpl	-168(%rbp), %eax
+	jl	.L803
+	movl	$1, %eax
+	jmp	.L804
+.L793:
+	movl	$0, -264(%rbp)
+	jmp	.L805
+.L820:
+	movl	$0, -268(%rbp)
+	jmp	.L806
+.L819:
+	movl	$0, -260(%rbp)
+	jmp	.L807
+.L814:
+	movq	-296(%rbp), %rax
+	movl	-260(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$4628, %rdx
+	movl	8(%rax,%rdx,4), %eax
+	movl	%eax, -192(%rbp)
+	movl	$0, -252(%rbp)
+	jmp	.L808
+.L813:
+	movl	$0, -256(%rbp)
+	jmp	.L809
+.L812:
+	movq	-296(%rbp), %rcx
+	movl	-192(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18084, %rax
+	movl	(%rax), %eax
+	imull	-268(%rbp), %eax
+	movl	%eax, %edx
+	movl	-256(%rbp), %eax
+	addl	%edx, %eax
+	sall	$3, %eax
+	movl	%eax, -188(%rbp)
+	movq	-296(%rbp), %rcx
+	movl	-192(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18088, %rax
+	movl	(%rax), %eax
+	imull	-264(%rbp), %eax
+	movl	%eax, %edx
+	movl	-252(%rbp), %eax
+	addl	%edx, %eax
+	sall	$3, %eax
+	movl	%eax, -184(%rbp)
+	movq	-296(%rbp), %rcx
+	movl	-192(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18100, %rax
+	movl	(%rax), %eax
+	movl	%eax, -180(%rbp)
+	movq	-296(%rbp), %rcx
+	movl	-192(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18092, %rax
+	movl	(%rax), %eax
+	cltq
+	addq	$105, %rax
+	salq	$7, %rax
+	movq	%rax, %rdx
+	movq	-296(%rbp), %rax
+	addq	%rdx, %rax
+	leaq	8(%rax), %rdi
+	movl	-180(%rbp), %eax
+	cltq
+	salq	$10, %rax
+	leaq	13952(%rax), %rdx
+	movq	-296(%rbp), %rax
+	addq	%rdx, %rax
+	leaq	8(%rax), %r10
+	movq	-296(%rbp), %rax
+	leaq	6728(%rax), %rdx
+	movl	-180(%rbp), %eax
+	cltq
+	imulq	$1680, %rax, %rax
+	leaq	(%rdx,%rax), %rcx
+	movq	-296(%rbp), %rax
+	leaq	8(%rax), %r8
+	movq	-296(%rbp), %rsi
+	movl	-192(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18096, %rax
+	movl	(%rax), %eax
+	cltq
+	imulq	$1680, %rax, %rax
+	leaq	(%r8,%rax), %rdx
+	movl	-192(%rbp), %r8d
+	leaq	-144(%rbp), %rsi
+	movq	-296(%rbp), %rax
+	subq	$8, %rsp
+	pushq	%rdi
+	movl	%r8d, %r9d
+	movq	%r10, %r8
+	movq	%rax, %rdi
+	call	stbi__jpeg_decode_block
+	addq	$16, %rsp
+	testl	%eax, %eax
+	jne	.L810
+	movl	$0, %eax
+	jmp	.L804
+.L810:
+	movq	-296(%rbp), %rax
+	movq	18544(%rax), %r8
+	movq	-296(%rbp), %rcx
+	movl	-192(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18116, %rax
+	movl	(%rax), %ecx
+	movq	-296(%rbp), %rsi
+	movl	-192(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18128, %rax
+	movq	(%rax), %rsi
+	movq	-296(%rbp), %rdi
+	movl	-192(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rdi, %rax
+	addq	$18116, %rax
+	movl	(%rax), %eax
+	imull	-184(%rbp), %eax
+	movslq	%eax, %rdx
+	movl	-188(%rbp), %eax
+	cltq
+	addq	%rdx, %rax
+	leaq	(%rsi,%rax), %rdi
+	leaq	-144(%rbp), %rax
+	movq	%rax, %rdx
+	movl	%ecx, %esi
+	call	*%r8
+	addl	$1, -256(%rbp)
+.L809:
+	movq	-296(%rbp), %rcx
+	movl	-192(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18084, %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -256(%rbp)
+	jl	.L812
+	addl	$1, -252(%rbp)
+.L808:
+	movq	-296(%rbp), %rcx
+	movl	-192(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18088, %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -252(%rbp)
+	jl	.L813
+	addl	$1, -260(%rbp)
+.L807:
+	movq	-296(%rbp), %rax
+	movl	18516(%rax), %eax
+	cmpl	%eax, -260(%rbp)
+	jl	.L814
+	movq	-296(%rbp), %rax
+	movl	18540(%rax), %eax
+	leal	-1(%rax), %edx
+	movq	-296(%rbp), %rax
+	movl	%edx, 18540(%rax)
+	movq	-296(%rbp), %rax
+	movl	18540(%rax), %eax
+	testl	%eax, %eax
+	jg	.L815
+	movq	-296(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	$23, %eax
+	jg	.L816
+	movq	-296(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__grow_buffer_unsafe
+.L816:
+	movq	-296(%rbp), %rax
+	movzbl	18472(%rax), %eax
+	cmpb	$-49, %al
+	jbe	.L817
+	movq	-296(%rbp), %rax
+	movzbl	18472(%rax), %eax
+	cmpb	$-41, %al
+	jbe	.L818
+.L817:
+	movl	$1, %eax
+	jmp	.L804
+.L818:
+	movq	-296(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__jpeg_reset
+.L815:
+	addl	$1, -268(%rbp)
+.L806:
+	movq	-296(%rbp), %rax
+	movl	18064(%rax), %eax
+	cmpl	%eax, -268(%rbp)
+	jl	.L819
+	addl	$1, -264(%rbp)
+.L805:
+	movq	-296(%rbp), %rax
+	movl	18068(%rax), %eax
+	cmpl	%eax, -264(%rbp)
+	jl	.L820
+	movl	$1, %eax
+	jmp	.L804
+.L792:
+	movq	-296(%rbp), %rax
+	movl	18516(%rax), %eax
+	cmpl	$1, %eax
+	jne	.L821
+	movq	-296(%rbp), %rax
+	movl	18520(%rax), %eax
+	movl	%eax, -208(%rbp)
+	movq	-296(%rbp), %rcx
+	movl	-208(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18108, %rax
+	movl	(%rax), %eax
+	addl	$7, %eax
+	sarl	$3, %eax
+	movl	%eax, -204(%rbp)
+	movq	-296(%rbp), %rcx
+	movl	-208(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18112, %rax
+	movl	(%rax), %eax
+	addl	$7, %eax
+	sarl	$3, %eax
+	movl	%eax, -200(%rbp)
+	movl	$0, -244(%rbp)
+	jmp	.L822
+.L831:
+	movl	$0, -248(%rbp)
+	jmp	.L823
+.L830:
+	movq	-296(%rbp), %rcx
+	movl	-208(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18160, %rax
+	movq	(%rax), %rcx
+	movq	-296(%rbp), %rsi
+	movl	-208(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18168, %rax
+	movl	(%rax), %eax
+	imull	-244(%rbp), %eax
+	movl	%eax, %edx
+	movl	-248(%rbp), %eax
+	addl	%edx, %eax
+	sall	$6, %eax
+	cltq
+	addq	%rax, %rax
+	addq	%rcx, %rax
+	movq	%rax, -152(%rbp)
+	movq	-296(%rbp), %rax
+	movl	18484(%rax), %eax
+	testl	%eax, %eax
+	jne	.L824
+	movq	-296(%rbp), %rcx
+	movl	-208(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18096, %rax
+	movl	(%rax), %eax
+	cltq
+	imulq	$1680, %rax, %rdx
+	movq	-296(%rbp), %rax
+	addq	%rdx, %rax
+	leaq	8(%rax), %rdi
+	movl	-208(%rbp), %edx
+	movq	-152(%rbp), %rsi
+	movq	-296(%rbp), %rax
+	movl	%edx, %ecx
+	movq	%rdi, %rdx
+	movq	%rax, %rdi
+	call	stbi__jpeg_decode_block_prog_dc
+	testl	%eax, %eax
+	jne	.L825
+	movl	$0, %eax
+	jmp	.L804
+.L824:
+	movq	-296(%rbp), %rcx
+	movl	-208(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18100, %rax
+	movl	(%rax), %eax
+	movl	%eax, -196(%rbp)
+	movl	-196(%rbp), %eax
+	cltq
+	salq	$10, %rax
+	leaq	13952(%rax), %rdx
+	movq	-296(%rbp), %rax
+	addq	%rdx, %rax
+	leaq	8(%rax), %rcx
+	movl	-196(%rbp), %eax
+	cltq
+	imulq	$1680, %rax, %rax
+	leaq	6720(%rax), %rdx
+	movq	-296(%rbp), %rax
+	addq	%rdx, %rax
+	leaq	8(%rax), %rdx
+	movq	-152(%rbp), %rsi
+	movq	-296(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__jpeg_decode_block_prog_ac
+	testl	%eax, %eax
+	jne	.L825
+	movl	$0, %eax
+	jmp	.L804
+.L825:
+	movq	-296(%rbp), %rax
+	movl	18540(%rax), %eax
+	leal	-1(%rax), %edx
+	movq	-296(%rbp), %rax
+	movl	%edx, 18540(%rax)
+	movq	-296(%rbp), %rax
+	movl	18540(%rax), %eax
+	testl	%eax, %eax
+	jg	.L826
+	movq	-296(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	$23, %eax
+	jg	.L827
+	movq	-296(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__grow_buffer_unsafe
+.L827:
+	movq	-296(%rbp), %rax
+	movzbl	18472(%rax), %eax
+	cmpb	$-49, %al
+	jbe	.L828
+	movq	-296(%rbp), %rax
+	movzbl	18472(%rax), %eax
+	cmpb	$-41, %al
+	jbe	.L829
+.L828:
+	movl	$1, %eax
+	jmp	.L804
+.L829:
+	movq	-296(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__jpeg_reset
+.L826:
+	addl	$1, -248(%rbp)
+.L823:
+	movl	-248(%rbp), %eax
+	cmpl	-204(%rbp), %eax
+	jl	.L830
+	addl	$1, -244(%rbp)
+.L822:
+	movl	-244(%rbp), %eax
+	cmpl	-200(%rbp), %eax
+	jl	.L831
+	movl	$1, %eax
+	jmp	.L804
+.L821:
+	movl	$0, -236(%rbp)
+	jmp	.L832
+.L846:
+	movl	$0, -240(%rbp)
+	jmp	.L833
+.L845:
+	movl	$0, -232(%rbp)
+	jmp	.L834
+.L840:
+	movq	-296(%rbp), %rax
+	movl	-232(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$4628, %rdx
+	movl	8(%rax,%rdx,4), %eax
+	movl	%eax, -220(%rbp)
+	movl	$0, -224(%rbp)
+	jmp	.L835
+.L839:
+	movl	$0, -228(%rbp)
+	jmp	.L836
+.L838:
+	movq	-296(%rbp), %rcx
+	movl	-220(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18084, %rax
+	movl	(%rax), %eax
+	imull	-240(%rbp), %eax
+	movl	%eax, %edx
+	movl	-228(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -216(%rbp)
+	movq	-296(%rbp), %rcx
+	movl	-220(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18088, %rax
+	movl	(%rax), %eax
+	imull	-236(%rbp), %eax
+	movl	%eax, %edx
+	movl	-224(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -212(%rbp)
+	movq	-296(%rbp), %rcx
+	movl	-220(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18160, %rax
+	movq	(%rax), %rcx
+	movq	-296(%rbp), %rsi
+	movl	-220(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18168, %rax
+	movl	(%rax), %eax
+	imull	-212(%rbp), %eax
+	movl	%eax, %edx
+	movl	-216(%rbp), %eax
+	addl	%edx, %eax
+	sall	$6, %eax
+	cltq
+	addq	%rax, %rax
+	addq	%rcx, %rax
+	movq	%rax, -160(%rbp)
+	movq	-296(%rbp), %rcx
+	movl	-220(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18096, %rax
+	movl	(%rax), %eax
+	cltq
+	imulq	$1680, %rax, %rdx
+	movq	-296(%rbp), %rax
+	addq	%rdx, %rax
+	leaq	8(%rax), %rdi
+	movl	-220(%rbp), %edx
+	movq	-160(%rbp), %rsi
+	movq	-296(%rbp), %rax
+	movl	%edx, %ecx
+	movq	%rdi, %rdx
+	movq	%rax, %rdi
+	call	stbi__jpeg_decode_block_prog_dc
+	testl	%eax, %eax
+	jne	.L837
+	movl	$0, %eax
+	jmp	.L804
+.L837:
+	addl	$1, -228(%rbp)
+.L836:
+	movq	-296(%rbp), %rcx
+	movl	-220(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18084, %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -228(%rbp)
+	jl	.L838
+	addl	$1, -224(%rbp)
+.L835:
+	movq	-296(%rbp), %rcx
+	movl	-220(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18088, %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -224(%rbp)
+	jl	.L839
+	addl	$1, -232(%rbp)
+.L834:
+	movq	-296(%rbp), %rax
+	movl	18516(%rax), %eax
+	cmpl	%eax, -232(%rbp)
+	jl	.L840
+	movq	-296(%rbp), %rax
+	movl	18540(%rax), %eax
+	leal	-1(%rax), %edx
+	movq	-296(%rbp), %rax
+	movl	%edx, 18540(%rax)
+	movq	-296(%rbp), %rax
+	movl	18540(%rax), %eax
+	testl	%eax, %eax
+	jg	.L841
+	movq	-296(%rbp), %rax
+	movl	18468(%rax), %eax
+	cmpl	$23, %eax
+	jg	.L842
+	movq	-296(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__grow_buffer_unsafe
+.L842:
+	movq	-296(%rbp), %rax
+	movzbl	18472(%rax), %eax
+	cmpb	$-49, %al
+	jbe	.L843
+	movq	-296(%rbp), %rax
+	movzbl	18472(%rax), %eax
+	cmpb	$-41, %al
+	jbe	.L844
+.L843:
+	movl	$1, %eax
+	jmp	.L804
+.L844:
+	movq	-296(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__jpeg_reset
+.L841:
+	addl	$1, -240(%rbp)
+.L833:
+	movq	-296(%rbp), %rax
+	movl	18064(%rax), %eax
+	cmpl	%eax, -240(%rbp)
+	jl	.L845
+	addl	$1, -236(%rbp)
+.L832:
+	movq	-296(%rbp), %rax
+	movl	18068(%rax), %eax
+	cmpl	%eax, -236(%rbp)
+	jl	.L846
+	movl	$1, %eax
+.L804:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L847
+	call	__stack_chk_fail@PLT
+.L847:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4964:
+	.size	stbi__parse_entropy_coded_data, .-stbi__parse_entropy_coded_data
+	.type	stbi__jpeg_dequantize, @function
+stbi__jpeg_dequantize:
+.LFB4965:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movl	$0, -4(%rbp)
+	jmp	.L849
+.L850:
+	movl	-4(%rbp), %eax
+	cltq
+	leaq	(%rax,%rax), %rdx
+	movq	-24(%rbp), %rax
+	addq	%rdx, %rax
+	movzwl	(%rax), %eax
+	movl	%eax, %ecx
+	movl	-4(%rbp), %eax
+	cltq
+	leaq	(%rax,%rax), %rdx
+	movq	-32(%rbp), %rax
+	addq	%rdx, %rax
+	movzwl	(%rax), %eax
+	imull	%eax, %ecx
+	movl	-4(%rbp), %eax
+	cltq
+	leaq	(%rax,%rax), %rdx
+	movq	-24(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movw	%dx, (%rax)
+	addl	$1, -4(%rbp)
+.L849:
+	cmpl	$63, -4(%rbp)
+	jle	.L850
+	nop
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4965:
+	.size	stbi__jpeg_dequantize, .-stbi__jpeg_dequantize
+	.type	stbi__jpeg_finish, @function
+stbi__jpeg_finish:
+.LFB4966:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -40(%rbp)
+	movq	-40(%rbp), %rax
+	movl	18480(%rax), %eax
+	testl	%eax, %eax
+	je	.L859
+	movl	$0, -20(%rbp)
+	jmp	.L853
+.L858:
+	movq	-40(%rbp), %rcx
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18108, %rax
+	movl	(%rax), %eax
+	addl	$7, %eax
+	sarl	$3, %eax
+	movl	%eax, -16(%rbp)
+	movq	-40(%rbp), %rcx
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18112, %rax
+	movl	(%rax), %eax
+	addl	$7, %eax
+	sarl	$3, %eax
+	movl	%eax, -12(%rbp)
+	movl	$0, -24(%rbp)
+	jmp	.L854
+.L857:
+	movl	$0, -28(%rbp)
+	jmp	.L855
+.L856:
+	movq	-40(%rbp), %rcx
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18160, %rax
+	movq	(%rax), %rcx
+	movq	-40(%rbp), %rsi
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18168, %rax
+	movl	(%rax), %eax
+	imull	-24(%rbp), %eax
+	movl	%eax, %edx
+	movl	-28(%rbp), %eax
+	addl	%edx, %eax
+	sall	$6, %eax
+	cltq
+	addq	%rax, %rax
+	addq	%rcx, %rax
+	movq	%rax, -8(%rbp)
+	movq	-40(%rbp), %rcx
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18092, %rax
+	movl	(%rax), %eax
+	cltq
+	addq	$105, %rax
+	salq	$7, %rax
+	movq	%rax, %rdx
+	movq	-40(%rbp), %rax
+	addq	%rdx, %rax
+	leaq	8(%rax), %rdx
+	movq	-8(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__jpeg_dequantize
+	movq	-40(%rbp), %rax
+	movq	18544(%rax), %r8
+	movq	-40(%rbp), %rcx
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18116, %rax
+	movl	(%rax), %ecx
+	movq	-40(%rbp), %rsi
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18128, %rax
+	movq	(%rax), %rsi
+	movq	-40(%rbp), %rdi
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rdi, %rax
+	addq	$18116, %rax
+	movl	(%rax), %eax
+	imull	-24(%rbp), %eax
+	sall	$3, %eax
+	movslq	%eax, %rdx
+	movl	-28(%rbp), %eax
+	sall	$3, %eax
+	cltq
+	addq	%rdx, %rax
+	leaq	(%rsi,%rax), %rdi
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdx
+	movl	%ecx, %esi
+	call	*%r8
+	addl	$1, -28(%rbp)
+.L855:
+	movl	-28(%rbp), %eax
+	cmpl	-16(%rbp), %eax
+	jl	.L856
+	addl	$1, -24(%rbp)
+.L854:
+	movl	-24(%rbp), %eax
+	cmpl	-12(%rbp), %eax
+	jl	.L857
+	addl	$1, -20(%rbp)
+.L853:
+	movq	-40(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %eax
+	cmpl	%eax, -20(%rbp)
+	jl	.L858
+.L859:
+	nop
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4966:
+	.size	stbi__jpeg_finish, .-stbi__jpeg_finish
+	.section	.rodata
+.LC15:
+	.string	"expected marker"
+.LC16:
+	.string	"bad DRI len"
+.LC17:
+	.string	"bad DQT type"
+.LC18:
+	.string	"bad DQT table"
+.LC19:
+	.string	"bad DHT header"
+.LC20:
+	.string	"bad COM len"
+.LC21:
+	.string	"bad APP len"
+.LC22:
+	.string	"unknown marker"
+	.text
+	.type	stbi__process_marker, @function
+stbi__process_marker:
+.LFB4967:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$184, %rsp
+	.cfi_offset 3, -24
+	movq	%rdi, -184(%rbp)
+	movl	%esi, -188(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -24(%rbp)
+	xorl	%eax, %eax
+	cmpl	$255, -188(%rbp)
+	je	.L861
+	cmpl	$255, -188(%rbp)
+	jg	.L862
+	cmpl	$221, -188(%rbp)
+	je	.L863
+	cmpl	$221, -188(%rbp)
+	jg	.L862
+	cmpl	$196, -188(%rbp)
+	je	.L864
+	cmpl	$219, -188(%rbp)
+	je	.L865
+	jmp	.L862
+.L861:
+	leaq	.LC15(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L866
+.L863:
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	cmpl	$4, %eax
+	je	.L867
+	leaq	.LC16(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L866
+.L867:
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movq	-184(%rbp), %rdx
+	movl	%eax, 18536(%rdx)
+	movl	$1, %eax
+	jmp	.L866
+.L865:
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	subl	$2, %eax
+	movl	%eax, -164(%rbp)
+	jmp	.L868
+.L877:
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -132(%rbp)
+	movl	-132(%rbp), %eax
+	sarl	$4, %eax
+	movl	%eax, -128(%rbp)
+	cmpl	$0, -128(%rbp)
+	setne	%al
+	movzbl	%al, %eax
+	movl	%eax, -124(%rbp)
+	movl	-132(%rbp), %eax
+	andl	$15, %eax
+	movl	%eax, -120(%rbp)
+	cmpl	$0, -128(%rbp)
+	je	.L869
+	cmpl	$1, -128(%rbp)
+	je	.L869
+	leaq	.LC17(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L866
+.L869:
+	cmpl	$3, -120(%rbp)
+	jle	.L870
+	leaq	.LC18(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L866
+.L870:
+	movl	$0, -160(%rbp)
+	jmp	.L871
+.L874:
+	cmpl	$0, -124(%rbp)
+	je	.L872
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	jmp	.L873
+.L872:
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+.L873:
+	movl	-160(%rbp), %edx
+	movslq	%edx, %rdx
+	leaq	stbi__jpeg_dezigzag(%rip), %rcx
+	movzbl	(%rdx,%rcx), %edx
+	movzbl	%dl, %ecx
+	movq	-184(%rbp), %rdx
+	movslq	%ecx, %rcx
+	movl	-120(%rbp), %esi
+	movslq	%esi, %rsi
+	salq	$6, %rsi
+	addq	%rsi, %rcx
+	addq	$6720, %rcx
+	movw	%ax, 8(%rdx,%rcx,2)
+	addl	$1, -160(%rbp)
+.L871:
+	cmpl	$63, -160(%rbp)
+	jle	.L874
+	cmpl	$0, -124(%rbp)
+	je	.L875
+	movl	$129, %eax
+	jmp	.L876
+.L875:
+	movl	$65, %eax
+.L876:
+	subl	%eax, -164(%rbp)
+.L868:
+	cmpl	$0, -164(%rbp)
+	jg	.L877
+	cmpl	$0, -164(%rbp)
+	sete	%al
+	movzbl	%al, %eax
+	jmp	.L866
+.L864:
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	subl	$2, %eax
+	movl	%eax, -164(%rbp)
+	jmp	.L878
+.L892:
+	movl	$0, -152(%rbp)
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -116(%rbp)
+	movl	-116(%rbp), %eax
+	sarl	$4, %eax
+	movl	%eax, -112(%rbp)
+	movl	-116(%rbp), %eax
+	andl	$15, %eax
+	movl	%eax, -108(%rbp)
+	cmpl	$1, -112(%rbp)
+	jg	.L879
+	cmpl	$3, -108(%rbp)
+	jle	.L880
+.L879:
+	leaq	.LC19(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L866
+.L880:
+	movl	$0, -156(%rbp)
+	jmp	.L882
+.L883:
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %edx
+	movl	-156(%rbp), %eax
+	cltq
+	movl	%edx, -96(%rbp,%rax,4)
+	movl	-156(%rbp), %eax
+	cltq
+	movl	-96(%rbp,%rax,4), %eax
+	addl	%eax, -152(%rbp)
+	addl	$1, -156(%rbp)
+.L882:
+	cmpl	$15, -156(%rbp)
+	jle	.L883
+	cmpl	$256, -152(%rbp)
+	jle	.L884
+	leaq	.LC19(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L866
+.L884:
+	subl	$17, -164(%rbp)
+	cmpl	$0, -112(%rbp)
+	jne	.L885
+	movq	-184(%rbp), %rax
+	leaq	8(%rax), %rdx
+	movl	-108(%rbp), %eax
+	cltq
+	imulq	$1680, %rax, %rax
+	addq	%rax, %rdx
+	leaq	-96(%rbp), %rax
+	movq	%rax, %rsi
+	movq	%rdx, %rdi
+	call	stbi__build_huffman
+	testl	%eax, %eax
+	jne	.L886
+	movl	$0, %eax
+	jmp	.L866
+.L886:
+	movl	-108(%rbp), %eax
+	cltq
+	imulq	$1680, %rax, %rax
+	leaq	1024(%rax), %rdx
+	movq	-184(%rbp), %rax
+	addq	%rdx, %rax
+	addq	$8, %rax
+	movq	%rax, -104(%rbp)
+	jmp	.L887
+.L885:
+	movq	-184(%rbp), %rax
+	leaq	6728(%rax), %rdx
+	movl	-108(%rbp), %eax
+	cltq
+	imulq	$1680, %rax, %rax
+	addq	%rax, %rdx
+	leaq	-96(%rbp), %rax
+	movq	%rax, %rsi
+	movq	%rdx, %rdi
+	call	stbi__build_huffman
+	testl	%eax, %eax
+	jne	.L888
+	movl	$0, %eax
+	jmp	.L866
+.L888:
+	movl	-108(%rbp), %eax
+	cltq
+	imulq	$1680, %rax, %rax
+	leaq	7744(%rax), %rdx
+	movq	-184(%rbp), %rax
+	addq	%rdx, %rax
+	addq	$8, %rax
+	movq	%rax, -104(%rbp)
+.L887:
+	movl	$0, -156(%rbp)
+	jmp	.L889
+.L890:
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movl	-156(%rbp), %edx
+	movslq	%edx, %rcx
+	movq	-104(%rbp), %rdx
+	leaq	(%rcx,%rdx), %rbx
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, (%rbx)
+	addl	$1, -156(%rbp)
+.L889:
+	movl	-156(%rbp), %eax
+	cmpl	-152(%rbp), %eax
+	jl	.L890
+	cmpl	$0, -112(%rbp)
+	je	.L891
+	movq	-184(%rbp), %rax
+	leaq	6728(%rax), %rdx
+	movl	-108(%rbp), %eax
+	cltq
+	imulq	$1680, %rax, %rax
+	addq	%rax, %rdx
+	movl	-108(%rbp), %eax
+	cltq
+	salq	$10, %rax
+	leaq	13952(%rax), %rcx
+	movq	-184(%rbp), %rax
+	addq	%rcx, %rax
+	addq	$8, %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__build_fast_ac
+.L891:
+	movl	-152(%rbp), %eax
+	subl	%eax, -164(%rbp)
+.L878:
+	cmpl	$0, -164(%rbp)
+	jg	.L892
+	cmpl	$0, -164(%rbp)
+	sete	%al
+	movzbl	%al, %eax
+	jmp	.L866
+.L862:
+	cmpl	$223, -188(%rbp)
+	jle	.L893
+	cmpl	$239, -188(%rbp)
+	jle	.L894
+.L893:
+	cmpl	$254, -188(%rbp)
+	jne	.L895
+.L894:
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, -164(%rbp)
+	cmpl	$1, -164(%rbp)
+	jg	.L896
+	cmpl	$254, -188(%rbp)
+	jne	.L897
+	leaq	.LC20(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L866
+.L897:
+	leaq	.LC21(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L866
+.L896:
+	subl	$2, -164(%rbp)
+	cmpl	$224, -188(%rbp)
+	jne	.L898
+	cmpl	$4, -164(%rbp)
+	jle	.L898
+	movl	$1, -148(%rbp)
+	movl	$0, -144(%rbp)
+	jmp	.L899
+.L901:
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movl	-144(%rbp), %edx
+	movslq	%edx, %rdx
+	leaq	tag.15(%rip), %rcx
+	movzbl	(%rdx,%rcx), %edx
+	cmpb	%dl, %al
+	je	.L900
+	movl	$0, -148(%rbp)
+.L900:
+	addl	$1, -144(%rbp)
+.L899:
+	cmpl	$4, -144(%rbp)
+	jle	.L901
+	subl	$5, -164(%rbp)
+	cmpl	$0, -148(%rbp)
+	je	.L908
+	movq	-184(%rbp), %rax
+	movl	$1, 18504(%rax)
+	jmp	.L908
+.L898:
+	cmpl	$238, -188(%rbp)
+	jne	.L903
+	cmpl	$11, -164(%rbp)
+	jle	.L903
+	movl	$1, -140(%rbp)
+	movl	$0, -136(%rbp)
+	jmp	.L904
+.L906:
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movl	-136(%rbp), %edx
+	movslq	%edx, %rdx
+	leaq	tag.14(%rip), %rcx
+	movzbl	(%rdx,%rcx), %edx
+	cmpb	%dl, %al
+	je	.L905
+	movl	$0, -140(%rbp)
+.L905:
+	addl	$1, -136(%rbp)
+.L904:
+	cmpl	$5, -136(%rbp)
+	jle	.L906
+	subl	$6, -164(%rbp)
+	cmpl	$0, -140(%rbp)
+	je	.L903
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %edx
+	movq	-184(%rbp), %rax
+	movl	%edx, 18508(%rax)
+	subl	$6, -164(%rbp)
+	jmp	.L903
+.L908:
+	nop
+.L903:
+	movq	-184(%rbp), %rax
+	movq	(%rax), %rax
+	movl	-164(%rbp), %edx
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movl	$1, %eax
+	jmp	.L866
+.L895:
+	leaq	.LC22(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+.L866:
+	movq	-24(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L907
+	call	__stack_chk_fail@PLT
+.L907:
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4967:
+	.size	stbi__process_marker, .-stbi__process_marker
+	.section	.rodata
+.LC23:
+	.string	"bad SOS component count"
+.LC24:
+	.string	"bad SOS len"
+.LC25:
+	.string	"bad DC huff"
+.LC26:
+	.string	"bad AC huff"
+.LC27:
+	.string	"bad SOS"
+	.text
+	.type	stbi__process_scan_header, @function
+stbi__process_scan_header:
+.LFB4968:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -40(%rbp)
+	movq	-40(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, -16(%rbp)
+	movq	-40(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, 18516(%rax)
+	movq	-40(%rbp), %rax
+	movl	18516(%rax), %eax
+	testl	%eax, %eax
+	jle	.L910
+	movq	-40(%rbp), %rax
+	movl	18516(%rax), %eax
+	cmpl	$4, %eax
+	jg	.L910
+	movq	-40(%rbp), %rax
+	movl	18516(%rax), %edx
+	movq	-40(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %eax
+	cmpl	%eax, %edx
+	jle	.L911
+.L910:
+	leaq	.LC23(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L912
+.L911:
+	movq	-40(%rbp), %rax
+	movl	18516(%rax), %eax
+	addl	$3, %eax
+	addl	%eax, %eax
+	cmpl	%eax, -16(%rbp)
+	je	.L913
+	leaq	.LC24(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L912
+.L913:
+	movl	$0, -24(%rbp)
+	jmp	.L914
+.L922:
+	movq	-40(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -8(%rbp)
+	movq	-40(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+	movl	$0, -20(%rbp)
+	jmp	.L915
+.L918:
+	movq	-40(%rbp), %rcx
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18080, %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -8(%rbp)
+	je	.L929
+	addl	$1, -20(%rbp)
+.L915:
+	movq	-40(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %eax
+	cmpl	%eax, -20(%rbp)
+	jl	.L918
+	jmp	.L917
+.L929:
+	nop
+.L917:
+	movq	-40(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %eax
+	cmpl	%eax, -20(%rbp)
+	jne	.L919
+	movl	$0, %eax
+	jmp	.L912
+.L919:
+	movl	-4(%rbp), %eax
+	sarl	$4, %eax
+	movl	%eax, %ecx
+	movq	-40(%rbp), %rsi
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18096, %rax
+	movl	%ecx, (%rax)
+	movq	-40(%rbp), %rcx
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18096, %rax
+	movl	(%rax), %eax
+	cmpl	$3, %eax
+	jle	.L920
+	leaq	.LC25(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L912
+.L920:
+	movl	-4(%rbp), %eax
+	andl	$15, %eax
+	movl	%eax, %ecx
+	movq	-40(%rbp), %rsi
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18100, %rax
+	movl	%ecx, (%rax)
+	movq	-40(%rbp), %rcx
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18100, %rax
+	movl	(%rax), %eax
+	cmpl	$3, %eax
+	jle	.L921
+	leaq	.LC26(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L912
+.L921:
+	movq	-40(%rbp), %rax
+	movl	-24(%rbp), %edx
+	movslq	%edx, %rdx
+	leaq	4628(%rdx), %rcx
+	movl	-20(%rbp), %edx
+	movl	%edx, 8(%rax,%rcx,4)
+	addl	$1, -24(%rbp)
+.L914:
+	movq	-40(%rbp), %rax
+	movl	18516(%rax), %eax
+	cmpl	%eax, -24(%rbp)
+	jl	.L922
+	movq	-40(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, 18484(%rax)
+	movq	-40(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, 18488(%rax)
+	movq	-40(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -12(%rbp)
+	movl	-12(%rbp), %eax
+	sarl	$4, %eax
+	movl	%eax, %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, 18492(%rax)
+	movl	-12(%rbp), %eax
+	andl	$15, %eax
+	movl	%eax, %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, 18496(%rax)
+	movq	-40(%rbp), %rax
+	movl	18480(%rax), %eax
+	testl	%eax, %eax
+	je	.L923
+	movq	-40(%rbp), %rax
+	movl	18484(%rax), %eax
+	cmpl	$63, %eax
+	jg	.L924
+	movq	-40(%rbp), %rax
+	movl	18488(%rax), %eax
+	cmpl	$63, %eax
+	jg	.L924
+	movq	-40(%rbp), %rax
+	movl	18484(%rax), %edx
+	movq	-40(%rbp), %rax
+	movl	18488(%rax), %eax
+	cmpl	%eax, %edx
+	jg	.L924
+	movq	-40(%rbp), %rax
+	movl	18492(%rax), %eax
+	cmpl	$13, %eax
+	jg	.L924
+	movq	-40(%rbp), %rax
+	movl	18496(%rax), %eax
+	cmpl	$13, %eax
+	jle	.L925
+.L924:
+	leaq	.LC27(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L912
+.L923:
+	movq	-40(%rbp), %rax
+	movl	18484(%rax), %eax
+	testl	%eax, %eax
+	je	.L926
+	leaq	.LC27(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L912
+.L926:
+	movq	-40(%rbp), %rax
+	movl	18492(%rax), %eax
+	testl	%eax, %eax
+	jne	.L927
+	movq	-40(%rbp), %rax
+	movl	18496(%rax), %eax
+	testl	%eax, %eax
+	je	.L928
+.L927:
+	leaq	.LC27(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L912
+.L928:
+	movq	-40(%rbp), %rax
+	movl	$63, 18488(%rax)
+.L925:
+	movl	$1, %eax
+.L912:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4968:
+	.size	stbi__process_scan_header, .-stbi__process_scan_header
+	.type	stbi__free_jpeg_components, @function
+stbi__free_jpeg_components:
+.LFB4969:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movl	%esi, -28(%rbp)
+	movl	%edx, -32(%rbp)
+	movl	$0, -4(%rbp)
+	jmp	.L931
+.L935:
+	movq	-24(%rbp), %rcx
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18136, %rax
+	movq	(%rax), %rax
+	testq	%rax, %rax
+	je	.L932
+	movq	-24(%rbp), %rcx
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18136, %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-24(%rbp), %rcx
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18136, %rax
+	movq	$0, (%rax)
+	movq	-24(%rbp), %rcx
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18128, %rax
+	movq	$0, (%rax)
+.L932:
+	movq	-24(%rbp), %rcx
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18144, %rax
+	movq	(%rax), %rax
+	testq	%rax, %rax
+	je	.L933
+	movq	-24(%rbp), %rcx
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18144, %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-24(%rbp), %rcx
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18144, %rax
+	movq	$0, (%rax)
+	movq	-24(%rbp), %rcx
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18160, %rax
+	movq	$0, (%rax)
+.L933:
+	movq	-24(%rbp), %rcx
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18152, %rax
+	movq	(%rax), %rax
+	testq	%rax, %rax
+	je	.L934
+	movq	-24(%rbp), %rcx
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18152, %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-24(%rbp), %rcx
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18152, %rax
+	movq	$0, (%rax)
+.L934:
+	addl	$1, -4(%rbp)
+.L931:
+	movl	-4(%rbp), %eax
+	cmpl	-28(%rbp), %eax
+	jl	.L935
+	movl	-32(%rbp), %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4969:
+	.size	stbi__free_jpeg_components, .-stbi__free_jpeg_components
+	.section	.rodata
+.LC28:
+	.string	"bad SOF len"
+.LC29:
+	.string	"only 8-bit"
+.LC30:
+	.string	"no header height"
+.LC31:
+	.string	"0 width"
+.LC32:
+	.string	"too large"
+.LC33:
+	.string	"bad component count"
+.LC34:
+	.string	"bad H"
+.LC35:
+	.string	"bad V"
+.LC36:
+	.string	"bad TQ"
+.LC37:
+	.string	"outofmem"
+	.text
+	.type	stbi__process_frame_header, @function
+stbi__process_frame_header:
+.LFB4970:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -56(%rbp)
+	movl	%esi, -60(%rbp)
+	movq	-56(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, -8(%rbp)
+	movl	$1, -32(%rbp)
+	movl	$1, -28(%rbp)
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, -24(%rbp)
+	cmpl	$10, -24(%rbp)
+	jg	.L938
+	leaq	.LC28(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L939
+.L938:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -20(%rbp)
+	cmpl	$8, -20(%rbp)
+	je	.L940
+	leaq	.LC29(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L939
+.L940:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, %edx
+	movq	-8(%rbp), %rax
+	movl	%edx, 4(%rax)
+	movq	-8(%rbp), %rax
+	movl	4(%rax), %eax
+	testl	%eax, %eax
+	jne	.L941
+	leaq	.LC30(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L939
+.L941:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, %edx
+	movq	-8(%rbp), %rax
+	movl	%edx, (%rax)
+	movq	-8(%rbp), %rax
+	movl	(%rax), %eax
+	testl	%eax, %eax
+	jne	.L942
+	leaq	.LC31(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L939
+.L942:
+	movq	-8(%rbp), %rax
+	movl	4(%rax), %eax
+	cmpl	$16777216, %eax
+	jbe	.L943
+	leaq	.LC32(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L939
+.L943:
+	movq	-8(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	$16777216, %eax
+	jbe	.L944
+	leaq	.LC32(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L939
+.L944:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -16(%rbp)
+	cmpl	$3, -16(%rbp)
+	je	.L945
+	cmpl	$1, -16(%rbp)
+	je	.L945
+	cmpl	$4, -16(%rbp)
+	je	.L945
+	leaq	.LC33(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L939
+.L945:
+	movq	-8(%rbp), %rax
+	movl	-16(%rbp), %edx
+	movl	%edx, 8(%rax)
+	movl	$0, -36(%rbp)
+	jmp	.L946
+.L947:
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18128, %rax
+	movq	$0, (%rax)
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18152, %rax
+	movq	$0, (%rax)
+	addl	$1, -36(%rbp)
+.L946:
+	movl	-36(%rbp), %eax
+	cmpl	-16(%rbp), %eax
+	jl	.L947
+	movq	-8(%rbp), %rax
+	movl	8(%rax), %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%edx, %eax
+	addl	$8, %eax
+	cmpl	%eax, -24(%rbp)
+	je	.L948
+	leaq	.LC28(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L939
+.L948:
+	movq	-56(%rbp), %rax
+	movl	$0, 18512(%rax)
+	movl	$0, -36(%rbp)
+	jmp	.L949
+.L956:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %ecx
+	movq	-56(%rbp), %rsi
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18080, %rax
+	movl	%ecx, (%rax)
+	movq	-8(%rbp), %rax
+	movl	8(%rax), %eax
+	cmpl	$3, %eax
+	jne	.L950
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18080, %rax
+	movl	(%rax), %edx
+	movl	-36(%rbp), %eax
+	cltq
+	leaq	rgb.13(%rip), %rcx
+	movzbl	(%rax,%rcx), %eax
+	movzbl	%al, %eax
+	cmpl	%eax, %edx
+	jne	.L950
+	movq	-56(%rbp), %rax
+	movl	18512(%rax), %eax
+	leal	1(%rax), %edx
+	movq	-56(%rbp), %rax
+	movl	%edx, 18512(%rax)
+.L950:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -12(%rbp)
+	movl	-12(%rbp), %eax
+	sarl	$4, %eax
+	movl	%eax, %ecx
+	movq	-56(%rbp), %rsi
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18084, %rax
+	movl	%ecx, (%rax)
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18084, %rax
+	movl	(%rax), %eax
+	testl	%eax, %eax
+	je	.L951
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18084, %rax
+	movl	(%rax), %eax
+	cmpl	$4, %eax
+	jle	.L952
+.L951:
+	leaq	.LC34(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L939
+.L952:
+	movl	-12(%rbp), %eax
+	andl	$15, %eax
+	movl	%eax, %ecx
+	movq	-56(%rbp), %rsi
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18088, %rax
+	movl	%ecx, (%rax)
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18088, %rax
+	movl	(%rax), %eax
+	testl	%eax, %eax
+	je	.L953
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18088, %rax
+	movl	(%rax), %eax
+	cmpl	$4, %eax
+	jle	.L954
+.L953:
+	leaq	.LC35(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L939
+.L954:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %ecx
+	movq	-56(%rbp), %rsi
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18092, %rax
+	movl	%ecx, (%rax)
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18092, %rax
+	movl	(%rax), %eax
+	cmpl	$3, %eax
+	jle	.L955
+	leaq	.LC36(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L939
+.L955:
+	addl	$1, -36(%rbp)
+.L949:
+	movq	-8(%rbp), %rax
+	movl	8(%rax), %eax
+	cmpl	%eax, -36(%rbp)
+	jl	.L956
+	cmpl	$0, -60(%rbp)
+	je	.L957
+	movl	$1, %eax
+	jmp	.L939
+.L957:
+	movq	-8(%rbp), %rax
+	movl	8(%rax), %eax
+	movq	-8(%rbp), %rdx
+	movl	4(%rdx), %edx
+	movl	%edx, %esi
+	movq	-8(%rbp), %rdx
+	movl	(%rdx), %edx
+	movl	%edx, %edi
+	movl	$0, %ecx
+	movl	%eax, %edx
+	call	stbi__mad3sizes_valid
+	testl	%eax, %eax
+	jne	.L958
+	leaq	.LC32(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L939
+.L958:
+	movl	$0, -36(%rbp)
+	jmp	.L959
+.L962:
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18084, %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -32(%rbp)
+	jge	.L960
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18084, %rax
+	movl	(%rax), %eax
+	movl	%eax, -32(%rbp)
+.L960:
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18088, %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -28(%rbp)
+	jge	.L961
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18088, %rax
+	movl	(%rax), %eax
+	movl	%eax, -28(%rbp)
+.L961:
+	addl	$1, -36(%rbp)
+.L959:
+	movq	-8(%rbp), %rax
+	movl	8(%rax), %eax
+	cmpl	%eax, -36(%rbp)
+	jl	.L962
+	movl	$0, -36(%rbp)
+	jmp	.L963
+.L966:
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18084, %rax
+	movl	(%rax), %edi
+	movl	-32(%rbp), %eax
+	cltd
+	idivl	%edi
+	movl	%edx, %ecx
+	movl	%ecx, %eax
+	testl	%eax, %eax
+	je	.L964
+	leaq	.LC34(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L939
+.L964:
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18088, %rax
+	movl	(%rax), %edi
+	movl	-28(%rbp), %eax
+	cltd
+	idivl	%edi
+	movl	%edx, %ecx
+	movl	%ecx, %eax
+	testl	%eax, %eax
+	je	.L965
+	leaq	.LC35(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L939
+.L965:
+	addl	$1, -36(%rbp)
+.L963:
+	movq	-8(%rbp), %rax
+	movl	8(%rax), %eax
+	cmpl	%eax, -36(%rbp)
+	jl	.L966
+	movq	-56(%rbp), %rax
+	movl	-32(%rbp), %edx
+	movl	%edx, 18056(%rax)
+	movq	-56(%rbp), %rax
+	movl	-28(%rbp), %edx
+	movl	%edx, 18060(%rax)
+	movl	-32(%rbp), %eax
+	leal	0(,%rax,8), %edx
+	movq	-56(%rbp), %rax
+	movl	%edx, 18072(%rax)
+	movl	-28(%rbp), %eax
+	leal	0(,%rax,8), %edx
+	movq	-56(%rbp), %rax
+	movl	%edx, 18076(%rax)
+	movq	-8(%rbp), %rax
+	movl	(%rax), %edx
+	movq	-56(%rbp), %rax
+	movl	18072(%rax), %eax
+	addl	%edx, %eax
+	leal	-1(%rax), %edx
+	movq	-56(%rbp), %rax
+	movl	18072(%rax), %eax
+	movl	%eax, %ecx
+	movl	%edx, %eax
+	movl	$0, %edx
+	divl	%ecx
+	movl	%eax, %edx
+	movq	-56(%rbp), %rax
+	movl	%edx, 18064(%rax)
+	movq	-8(%rbp), %rax
+	movl	4(%rax), %edx
+	movq	-56(%rbp), %rax
+	movl	18076(%rax), %eax
+	addl	%edx, %eax
+	leal	-1(%rax), %edx
+	movq	-56(%rbp), %rax
+	movl	18076(%rax), %eax
+	movl	%eax, %ecx
+	movl	%edx, %eax
+	movl	$0, %edx
+	divl	%ecx
+	movl	%eax, %edx
+	movq	-56(%rbp), %rax
+	movl	%edx, 18068(%rax)
+	movl	$0, -36(%rbp)
+	jmp	.L967
+.L971:
+	movq	-8(%rbp), %rax
+	movl	(%rax), %ecx
+	movq	-56(%rbp), %rsi
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18084, %rax
+	movl	(%rax), %eax
+	imull	%eax, %ecx
+	movl	%ecx, %edx
+	movl	-32(%rbp), %eax
+	addl	%edx, %eax
+	subl	$1, %eax
+	movl	-32(%rbp), %edi
+	movl	$0, %edx
+	divl	%edi
+	movl	%eax, %esi
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18108, %rax
+	movl	%esi, (%rax)
+	movq	-8(%rbp), %rax
+	movl	4(%rax), %ecx
+	movq	-56(%rbp), %rsi
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18088, %rax
+	movl	(%rax), %eax
+	imull	%eax, %ecx
+	movl	%ecx, %edx
+	movl	-28(%rbp), %eax
+	addl	%edx, %eax
+	subl	$1, %eax
+	movl	-28(%rbp), %edi
+	movl	$0, %edx
+	divl	%edi
+	movl	%eax, %esi
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18112, %rax
+	movl	%esi, (%rax)
+	movq	-56(%rbp), %rax
+	movl	18064(%rax), %ecx
+	movq	-56(%rbp), %rsi
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18084, %rax
+	movl	(%rax), %eax
+	imull	%ecx, %eax
+	leal	0(,%rax,8), %ecx
+	movq	-56(%rbp), %rsi
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18116, %rax
+	movl	%ecx, (%rax)
+	movq	-56(%rbp), %rax
+	movl	18068(%rax), %ecx
+	movq	-56(%rbp), %rsi
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18088, %rax
+	movl	(%rax), %eax
+	imull	%ecx, %eax
+	leal	0(,%rax,8), %ecx
+	movq	-56(%rbp), %rsi
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18120, %rax
+	movl	%ecx, (%rax)
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18160, %rax
+	movq	$0, (%rax)
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18144, %rax
+	movq	$0, (%rax)
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18152, %rax
+	movq	$0, (%rax)
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18120, %rax
+	movl	(%rax), %ecx
+	movq	-56(%rbp), %rsi
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18116, %rax
+	movl	(%rax), %eax
+	movl	$15, %edx
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__malloc_mad2
+	movq	%rax, %rcx
+	movq	-56(%rbp), %rsi
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18136, %rax
+	movq	%rcx, (%rax)
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18136, %rax
+	movq	(%rax), %rax
+	testq	%rax, %rax
+	jne	.L968
+	leaq	.LC37(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	movl	%eax, %edx
+	movl	-36(%rbp), %eax
+	leal	1(%rax), %ecx
+	movq	-56(%rbp), %rax
+	movl	%ecx, %esi
+	movq	%rax, %rdi
+	call	stbi__free_jpeg_components
+	jmp	.L939
+.L968:
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18136, %rax
+	movq	(%rax), %rax
+	addq	$15, %rax
+	andq	$-16, %rax
+	movq	%rax, %rsi
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18128, %rax
+	movq	%rsi, (%rax)
+	movq	-56(%rbp), %rax
+	movl	18480(%rax), %eax
+	testl	%eax, %eax
+	je	.L969
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18116, %rax
+	movl	(%rax), %eax
+	leal	7(%rax), %edx
+	testl	%eax, %eax
+	cmovs	%edx, %eax
+	sarl	$3, %eax
+	movl	%eax, %esi
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18168, %rax
+	movl	%esi, (%rax)
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18120, %rax
+	movl	(%rax), %eax
+	leal	7(%rax), %edx
+	testl	%eax, %eax
+	cmovs	%edx, %eax
+	sarl	$3, %eax
+	movl	%eax, %esi
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18172, %rax
+	movl	%esi, (%rax)
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18120, %rax
+	movl	(%rax), %esi
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18116, %rax
+	movl	(%rax), %eax
+	movl	$15, %ecx
+	movl	$2, %edx
+	movl	%eax, %edi
+	call	stbi__malloc_mad3
+	movq	%rax, %rcx
+	movq	-56(%rbp), %rsi
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18144, %rax
+	movq	%rcx, (%rax)
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18144, %rax
+	movq	(%rax), %rax
+	testq	%rax, %rax
+	jne	.L970
+	leaq	.LC37(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	movl	%eax, %edx
+	movl	-36(%rbp), %eax
+	leal	1(%rax), %ecx
+	movq	-56(%rbp), %rax
+	movl	%ecx, %esi
+	movq	%rax, %rdi
+	call	stbi__free_jpeg_components
+	jmp	.L939
+.L970:
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18144, %rax
+	movq	(%rax), %rax
+	addq	$15, %rax
+	andq	$-16, %rax
+	movq	%rax, %rsi
+	movq	-56(%rbp), %rcx
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18160, %rax
+	movq	%rsi, (%rax)
+.L969:
+	addl	$1, -36(%rbp)
+.L967:
+	movq	-8(%rbp), %rax
+	movl	8(%rax), %eax
+	cmpl	%eax, -36(%rbp)
+	jl	.L971
+	movl	$1, %eax
+.L939:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4970:
+	.size	stbi__process_frame_header, .-stbi__process_frame_header
+	.section	.rodata
+.LC38:
+	.string	"no SOI"
+.LC39:
+	.string	"no SOF"
+	.text
+	.type	stbi__decode_jpeg_header, @function
+stbi__decode_jpeg_header:
+.LFB4971:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movl	%esi, -28(%rbp)
+	movq	-24(%rbp), %rax
+	movl	$0, 18504(%rax)
+	movq	-24(%rbp), %rax
+	movl	$-1, 18508(%rax)
+	movq	-24(%rbp), %rax
+	movb	$-1, 18472(%rax)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get_marker
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+	cmpl	$216, -4(%rbp)
+	je	.L973
+	leaq	.LC38(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L974
+.L973:
+	cmpl	$1, -28(%rbp)
+	jne	.L975
+	movl	$1, %eax
+	jmp	.L974
+.L975:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get_marker
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+	jmp	.L976
+.L982:
+	movl	-4(%rbp), %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__process_marker
+	testl	%eax, %eax
+	jne	.L977
+	movl	$0, %eax
+	jmp	.L974
+.L977:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get_marker
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+	jmp	.L978
+.L980:
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__at_eof
+	testl	%eax, %eax
+	je	.L979
+	leaq	.LC39(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L974
+.L979:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get_marker
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+.L978:
+	cmpl	$255, -4(%rbp)
+	je	.L980
+.L976:
+	cmpl	$192, -4(%rbp)
+	je	.L981
+	cmpl	$193, -4(%rbp)
+	je	.L981
+	cmpl	$194, -4(%rbp)
+	jne	.L982
+.L981:
+	cmpl	$194, -4(%rbp)
+	sete	%al
+	movzbl	%al, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 18480(%rax)
+	movl	-28(%rbp), %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__process_frame_header
+	testl	%eax, %eax
+	jne	.L983
+	movl	$0, %eax
+	jmp	.L974
+.L983:
+	movl	$1, %eax
+.L974:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4971:
+	.size	stbi__decode_jpeg_header, .-stbi__decode_jpeg_header
+	.type	stbi__skip_jpeg_junk_at_end, @function
+stbi__skip_jpeg_junk_at_end:
+.LFB4972:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	jmp	.L985
+.L990:
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -1(%rbp)
+	jmp	.L986
+.L989:
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__at_eof
+	testl	%eax, %eax
+	je	.L987
+	movl	$-1, %eax
+	jmp	.L988
+.L987:
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -1(%rbp)
+	cmpb	$0, -1(%rbp)
+	je	.L986
+	cmpb	$-1, -1(%rbp)
+	je	.L986
+	movzbl	-1(%rbp), %eax
+	jmp	.L988
+.L986:
+	cmpb	$-1, -1(%rbp)
+	je	.L989
+.L985:
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__at_eof
+	testl	%eax, %eax
+	je	.L990
+	movl	$-1, %eax
+.L988:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4972:
+	.size	stbi__skip_jpeg_junk_at_end, .-stbi__skip_jpeg_junk_at_end
+	.section	.rodata
+.LC40:
+	.string	"bad DNL len"
+.LC41:
+	.string	"bad DNL height"
+	.text
+	.type	stbi__decode_jpeg_image, @function
+stbi__decode_jpeg_image:
+.LFB4973:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movl	$0, -12(%rbp)
+	jmp	.L992
+.L993:
+	movq	-24(%rbp), %rcx
+	movl	-12(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18136, %rax
+	movq	$0, (%rax)
+	movq	-24(%rbp), %rcx
+	movl	-12(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18144, %rax
+	movq	$0, (%rax)
+	addl	$1, -12(%rbp)
+.L992:
+	cmpl	$3, -12(%rbp)
+	jle	.L993
+	movq	-24(%rbp), %rax
+	movl	$0, 18536(%rax)
+	movq	-24(%rbp), %rax
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	stbi__decode_jpeg_header
+	testl	%eax, %eax
+	jne	.L994
+	movl	$0, %eax
+	jmp	.L995
+.L994:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get_marker
+	movzbl	%al, %eax
+	movl	%eax, -12(%rbp)
+	jmp	.L996
+.L1005:
+	cmpl	$218, -12(%rbp)
+	jne	.L997
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__process_scan_header
+	testl	%eax, %eax
+	jne	.L998
+	movl	$0, %eax
+	jmp	.L995
+.L998:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__parse_entropy_coded_data
+	testl	%eax, %eax
+	jne	.L999
+	movl	$0, %eax
+	jmp	.L995
+.L999:
+	movq	-24(%rbp), %rax
+	movzbl	18472(%rax), %eax
+	cmpb	$-1, %al
+	jne	.L1000
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__skip_jpeg_junk_at_end
+	movq	-24(%rbp), %rdx
+	movb	%al, 18472(%rdx)
+.L1000:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get_marker
+	movzbl	%al, %eax
+	movl	%eax, -12(%rbp)
+	cmpl	$207, -12(%rbp)
+	jle	.L996
+	cmpl	$215, -12(%rbp)
+	jg	.L996
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get_marker
+	movzbl	%al, %eax
+	movl	%eax, -12(%rbp)
+	jmp	.L996
+.L997:
+	cmpl	$220, -12(%rbp)
+	jne	.L1001
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, -8(%rbp)
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, -4(%rbp)
+	cmpl	$4, -8(%rbp)
+	je	.L1002
+	leaq	.LC40(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L995
+.L1002:
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movl	4(%rax), %eax
+	cmpl	%eax, -4(%rbp)
+	je	.L1003
+	leaq	.LC41(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L995
+.L1003:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get_marker
+	movzbl	%al, %eax
+	movl	%eax, -12(%rbp)
+	jmp	.L996
+.L1001:
+	movl	-12(%rbp), %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__process_marker
+	testl	%eax, %eax
+	jne	.L1004
+	movl	$1, %eax
+	jmp	.L995
+.L1004:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get_marker
+	movzbl	%al, %eax
+	movl	%eax, -12(%rbp)
+.L996:
+	cmpl	$217, -12(%rbp)
+	jne	.L1005
+	movq	-24(%rbp), %rax
+	movl	18480(%rax), %eax
+	testl	%eax, %eax
+	je	.L1006
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__jpeg_finish
+.L1006:
+	movl	$1, %eax
+.L995:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4973:
+	.size	stbi__decode_jpeg_image, .-stbi__decode_jpeg_image
+	.type	resample_row_1, @function
+resample_row_1:
+.LFB4974:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -8(%rbp)
+	movq	%rsi, -16(%rbp)
+	movq	%rdx, -24(%rbp)
+	movl	%ecx, -28(%rbp)
+	movl	%r8d, -32(%rbp)
+	movq	-16(%rbp), %rax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4974:
+	.size	resample_row_1, .-resample_row_1
+	.type	stbi__resample_row_v_2, @function
+stbi__resample_row_v_2:
+.LFB4975:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movl	%ecx, -44(%rbp)
+	movl	%r8d, -48(%rbp)
+	movl	$0, -4(%rbp)
+	jmp	.L1010
+.L1011:
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-32(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	leal	(%rax,%rdx), %ecx
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-40(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	%ecx, %eax
+	addl	$2, %eax
+	sarl	$2, %eax
+	movl	%eax, %ecx
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-24(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	addl	$1, -4(%rbp)
+.L1010:
+	movl	-4(%rbp), %eax
+	cmpl	-44(%rbp), %eax
+	jl	.L1011
+	movq	-24(%rbp), %rax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4975:
+	.size	stbi__resample_row_v_2, .-stbi__resample_row_v_2
+	.type	stbi__resample_row_h_2, @function
+stbi__resample_row_h_2:
+.LFB4976:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movl	%ecx, -44(%rbp)
+	movl	%r8d, -48(%rbp)
+	movq	-32(%rbp), %rax
+	movq	%rax, -8(%rbp)
+	cmpl	$1, -44(%rbp)
+	jne	.L1014
+	movq	-24(%rbp), %rax
+	addq	$1, %rax
+	movq	-8(%rbp), %rdx
+	movzbl	(%rdx), %edx
+	movb	%dl, (%rax)
+	movzbl	(%rax), %edx
+	movq	-24(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-24(%rbp), %rax
+	jmp	.L1015
+.L1014:
+	movq	-8(%rbp), %rax
+	movzbl	(%rax), %edx
+	movq	-24(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-8(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%eax, %edx
+	movq	-8(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	%edx, %eax
+	addl	$2, %eax
+	sarl	$2, %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	addq	$1, %rax
+	movb	%dl, (%rax)
+	movl	$1, -16(%rbp)
+	jmp	.L1016
+.L1017:
+	movl	-16(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-8(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%edx, %eax
+	addl	$2, %eax
+	movl	%eax, -12(%rbp)
+	movl	-16(%rbp), %eax
+	cltq
+	leaq	-1(%rax), %rdx
+	movq	-8(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	-12(%rbp), %eax
+	addl	%edx, %eax
+	sarl	$2, %eax
+	movl	%eax, %ecx
+	movl	-16(%rbp), %eax
+	addl	%eax, %eax
+	movslq	%eax, %rdx
+	movq	-24(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	movl	-16(%rbp), %eax
+	cltq
+	leaq	1(%rax), %rdx
+	movq	-8(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	-12(%rbp), %eax
+	addl	%edx, %eax
+	sarl	$2, %eax
+	movl	%eax, %ecx
+	movl	-16(%rbp), %eax
+	addl	%eax, %eax
+	cltq
+	leaq	1(%rax), %rdx
+	movq	-24(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	addl	$1, -16(%rbp)
+.L1016:
+	movl	-44(%rbp), %eax
+	subl	$1, %eax
+	cmpl	%eax, -16(%rbp)
+	jl	.L1017
+	movl	-44(%rbp), %eax
+	cltq
+	leaq	-2(%rax), %rdx
+	movq	-8(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%eax, %edx
+	movl	-44(%rbp), %eax
+	cltq
+	leaq	-1(%rax), %rcx
+	movq	-8(%rbp), %rax
+	addq	%rcx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	%edx, %eax
+	addl	$2, %eax
+	sarl	$2, %eax
+	movl	%eax, %ecx
+	movl	-16(%rbp), %eax
+	addl	%eax, %eax
+	movslq	%eax, %rdx
+	movq	-24(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	movl	-44(%rbp), %eax
+	cltq
+	leaq	-1(%rax), %rdx
+	movq	-8(%rbp), %rax
+	addq	%rdx, %rax
+	movl	-16(%rbp), %edx
+	addl	%edx, %edx
+	movslq	%edx, %rdx
+	leaq	1(%rdx), %rcx
+	movq	-24(%rbp), %rdx
+	addq	%rcx, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	movq	-24(%rbp), %rax
+.L1015:
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4976:
+	.size	stbi__resample_row_h_2, .-stbi__resample_row_h_2
+	.type	stbi__resample_row_hv_2, @function
+stbi__resample_row_hv_2:
+.LFB4977:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movl	%ecx, -44(%rbp)
+	movl	%r8d, -48(%rbp)
+	cmpl	$1, -44(%rbp)
+	jne	.L1019
+	movq	-32(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%eax, %edx
+	movq	-40(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	%edx, %eax
+	addl	$2, %eax
+	sarl	$2, %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	addq	$1, %rax
+	movb	%dl, (%rax)
+	movzbl	(%rax), %edx
+	movq	-24(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-24(%rbp), %rax
+	jmp	.L1020
+.L1019:
+	movq	-32(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%eax, %edx
+	movq	-40(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	%edx, %eax
+	movl	%eax, -8(%rbp)
+	movl	-8(%rbp), %eax
+	addl	$2, %eax
+	sarl	$2, %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movb	%dl, (%rax)
+	movl	$1, -12(%rbp)
+	jmp	.L1021
+.L1022:
+	movl	-8(%rbp), %eax
+	movl	%eax, -4(%rbp)
+	movl	-12(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-32(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	leal	(%rax,%rdx), %ecx
+	movl	-12(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-40(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	%ecx, %eax
+	movl	%eax, -8(%rbp)
+	movl	-4(%rbp), %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%eax, %edx
+	movl	-8(%rbp), %eax
+	addl	%edx, %eax
+	addl	$8, %eax
+	sarl	$4, %eax
+	movl	%eax, %ecx
+	movl	-12(%rbp), %eax
+	addl	%eax, %eax
+	cltq
+	leaq	-1(%rax), %rdx
+	movq	-24(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	movl	-8(%rbp), %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%eax, %edx
+	movl	-4(%rbp), %eax
+	addl	%edx, %eax
+	addl	$8, %eax
+	sarl	$4, %eax
+	movl	%eax, %ecx
+	movl	-12(%rbp), %eax
+	addl	%eax, %eax
+	movslq	%eax, %rdx
+	movq	-24(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	addl	$1, -12(%rbp)
+.L1021:
+	movl	-12(%rbp), %eax
+	cmpl	-44(%rbp), %eax
+	jl	.L1022
+	movl	-8(%rbp), %eax
+	addl	$2, %eax
+	sarl	$2, %eax
+	movl	%eax, %ecx
+	movl	-44(%rbp), %eax
+	addl	%eax, %eax
+	cltq
+	leaq	-1(%rax), %rdx
+	movq	-24(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	movq	-24(%rbp), %rax
+.L1020:
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4977:
+	.size	stbi__resample_row_hv_2, .-stbi__resample_row_hv_2
+	.type	stbi__resample_row_hv_2_simd, @function
+stbi__resample_row_hv_2_simd:
+.LFB4978:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$904, %rsp
+	movq	%rdi, -1000(%rbp)
+	movq	%rsi, -1008(%rbp)
+	movq	%rdx, -1016(%rbp)
+	movl	%ecx, -1020(%rbp)
+	movl	%r8d, -1024(%rbp)
+	movl	$0, -964(%rbp)
+	cmpl	$1, -1020(%rbp)
+	jne	.L1024
+	movq	-1008(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%eax, %edx
+	movq	-1016(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	%edx, %eax
+	addl	$2, %eax
+	sarl	$2, %eax
+	movl	%eax, %edx
+	movq	-1000(%rbp), %rax
+	addq	$1, %rax
+	movb	%dl, (%rax)
+	movzbl	(%rax), %edx
+	movq	-1000(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-1000(%rbp), %rax
+	jmp	.L1025
+.L1024:
+	movq	-1008(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%eax, %edx
+	movq	-1016(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	%edx, %eax
+	movl	%eax, -960(%rbp)
+	jmp	.L1026
+.L1052:
+	pxor	%xmm0, %xmm0
+	movaps	%xmm0, -848(%rbp)
+	movl	-964(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1016(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -888(%rbp)
+	movq	-888(%rbp), %rax
+	movq	(%rax), %rax
+	movl	$0, %edx
+	movq	%rdx, -880(%rbp)
+	movq	%rax, -872(%rbp)
+	movq	-872(%rbp), %rax
+	movq	-880(%rbp), %rdx
+	movq	%rdx, -864(%rbp)
+	movq	%rax, -856(%rbp)
+	movq	-864(%rbp), %xmm1
+	movq	-856(%rbp), %xmm0
+	punpcklqdq	%xmm1, %xmm0
+	nop
+	nop
+	movaps	%xmm0, -832(%rbp)
+	movl	-964(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1008(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -928(%rbp)
+	movq	-928(%rbp), %rax
+	movq	(%rax), %rax
+	movl	$0, %edx
+	movq	%rdx, -920(%rbp)
+	movq	%rax, -912(%rbp)
+	movq	-912(%rbp), %rax
+	movq	-920(%rbp), %rdx
+	movq	%rdx, -904(%rbp)
+	movq	%rax, -896(%rbp)
+	movq	-904(%rbp), %xmm1
+	movq	-896(%rbp), %xmm0
+	punpcklqdq	%xmm1, %xmm0
+	nop
+	nop
+	movaps	%xmm0, -816(%rbp)
+	movdqa	-832(%rbp), %xmm0
+	movaps	%xmm0, -32(%rbp)
+	movdqa	-848(%rbp), %xmm0
+	movaps	%xmm0, -16(%rbp)
+	movdqa	-16(%rbp), %xmm1
+	movdqa	-32(%rbp), %xmm0
+	punpcklbw	%xmm1, %xmm0
+	movaps	%xmm0, -800(%rbp)
+	movdqa	-816(%rbp), %xmm0
+	movaps	%xmm0, -64(%rbp)
+	movdqa	-848(%rbp), %xmm0
+	movaps	%xmm0, -48(%rbp)
+	movdqa	-48(%rbp), %xmm1
+	movdqa	-64(%rbp), %xmm0
+	punpcklbw	%xmm1, %xmm0
+	movaps	%xmm0, -784(%rbp)
+	movdqa	-800(%rbp), %xmm0
+	movaps	%xmm0, -96(%rbp)
+	movdqa	-784(%rbp), %xmm0
+	movaps	%xmm0, -80(%rbp)
+	movdqa	-96(%rbp), %xmm0
+	movdqa	-80(%rbp), %xmm1
+	psubw	%xmm1, %xmm0
+	movaps	%xmm0, -768(%rbp)
+	movdqa	-784(%rbp), %xmm0
+	movaps	%xmm0, -112(%rbp)
+	movl	$2, -940(%rbp)
+	movdqa	-112(%rbp), %xmm1
+	movd	-940(%rbp), %xmm0
+	psllw	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -752(%rbp)
+	movdqa	-752(%rbp), %xmm0
+	movaps	%xmm0, -144(%rbp)
+	movdqa	-768(%rbp), %xmm0
+	movaps	%xmm0, -128(%rbp)
+	movdqa	-144(%rbp), %xmm1
+	movdqa	-128(%rbp), %xmm0
+	paddw	%xmm1, %xmm0
+	movaps	%xmm0, -736(%rbp)
+	movdqa	-736(%rbp), %xmm0
+	pslldq	$2, %xmm0
+	movaps	%xmm0, -720(%rbp)
+	movdqa	-736(%rbp), %xmm0
+	psrldq	$2, %xmm0
+	movaps	%xmm0, -704(%rbp)
+	movl	-960(%rbp), %eax
+	cwtl
+	movdqa	-720(%rbp), %xmm0
+	pinsrw	$0, %eax, %xmm0
+	movaps	%xmm0, -688(%rbp)
+	movl	-964(%rbp), %eax
+	cltq
+	leaq	8(%rax), %rdx
+	movq	-1008(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	leal	(%rax,%rdx), %ecx
+	movl	-964(%rbp), %eax
+	cltq
+	leaq	8(%rax), %rdx
+	movq	-1016(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	%ecx, %eax
+	cwtl
+	movdqa	-704(%rbp), %xmm0
+	pinsrw	$7, %eax, %xmm0
+	movaps	%xmm0, -672(%rbp)
+	movw	$8, -982(%rbp)
+	movswl	-982(%rbp), %eax
+	movswl	-982(%rbp), %edx
+	movswl	-982(%rbp), %ecx
+	movswl	-982(%rbp), %esi
+	movswl	-982(%rbp), %edi
+	movswl	-982(%rbp), %r8d
+	movswl	-982(%rbp), %r9d
+	movswl	-982(%rbp), %r10d
+	movw	%r10w, -980(%rbp)
+	movw	%r9w, -978(%rbp)
+	movw	%r8w, -976(%rbp)
+	movw	%di, -974(%rbp)
+	movw	%si, -972(%rbp)
+	movw	%cx, -970(%rbp)
+	movw	%dx, -968(%rbp)
+	movw	%ax, -966(%rbp)
+	movzwl	-966(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-968(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm1
+	movzwl	-970(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-972(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm4
+	movzwl	-974(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-976(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm2
+	movzwl	-978(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-980(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm3
+	punpckldq	%xmm4, %xmm1
+	movdqa	%xmm1, %xmm0
+	movdqa	%xmm0, %xmm1
+	punpckldq	%xmm3, %xmm2
+	movdqa	%xmm2, %xmm0
+	movdqa	%xmm0, %xmm2
+	punpcklqdq	%xmm2, %xmm1
+	movdqa	%xmm1, %xmm0
+	nop
+	movaps	%xmm0, -656(%rbp)
+	movdqa	-736(%rbp), %xmm0
+	movaps	%xmm0, -160(%rbp)
+	movl	$2, -944(%rbp)
+	movdqa	-160(%rbp), %xmm1
+	movd	-944(%rbp), %xmm0
+	psllw	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -640(%rbp)
+	movdqa	-688(%rbp), %xmm0
+	movaps	%xmm0, -192(%rbp)
+	movdqa	-736(%rbp), %xmm0
+	movaps	%xmm0, -176(%rbp)
+	movdqa	-192(%rbp), %xmm0
+	movdqa	-176(%rbp), %xmm1
+	psubw	%xmm1, %xmm0
+	movaps	%xmm0, -624(%rbp)
+	movdqa	-672(%rbp), %xmm0
+	movaps	%xmm0, -224(%rbp)
+	movdqa	-736(%rbp), %xmm0
+	movaps	%xmm0, -208(%rbp)
+	movdqa	-224(%rbp), %xmm0
+	movdqa	-208(%rbp), %xmm1
+	psubw	%xmm1, %xmm0
+	movaps	%xmm0, -608(%rbp)
+	movdqa	-640(%rbp), %xmm0
+	movaps	%xmm0, -256(%rbp)
+	movdqa	-656(%rbp), %xmm0
+	movaps	%xmm0, -240(%rbp)
+	movdqa	-256(%rbp), %xmm1
+	movdqa	-240(%rbp), %xmm0
+	paddw	%xmm1, %xmm0
+	movaps	%xmm0, -592(%rbp)
+	movdqa	-624(%rbp), %xmm0
+	movaps	%xmm0, -288(%rbp)
+	movdqa	-592(%rbp), %xmm0
+	movaps	%xmm0, -272(%rbp)
+	movdqa	-288(%rbp), %xmm1
+	movdqa	-272(%rbp), %xmm0
+	paddw	%xmm1, %xmm0
+	movaps	%xmm0, -576(%rbp)
+	movdqa	-608(%rbp), %xmm0
+	movaps	%xmm0, -320(%rbp)
+	movdqa	-592(%rbp), %xmm0
+	movaps	%xmm0, -304(%rbp)
+	movdqa	-320(%rbp), %xmm1
+	movdqa	-304(%rbp), %xmm0
+	paddw	%xmm1, %xmm0
+	movaps	%xmm0, -560(%rbp)
+	movdqa	-576(%rbp), %xmm0
+	movaps	%xmm0, -352(%rbp)
+	movdqa	-560(%rbp), %xmm0
+	movaps	%xmm0, -336(%rbp)
+	movdqa	-336(%rbp), %xmm1
+	movdqa	-352(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -544(%rbp)
+	movdqa	-576(%rbp), %xmm0
+	movaps	%xmm0, -384(%rbp)
+	movdqa	-560(%rbp), %xmm0
+	movaps	%xmm0, -368(%rbp)
+	movdqa	-368(%rbp), %xmm1
+	movdqa	-384(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -528(%rbp)
+	movdqa	-544(%rbp), %xmm0
+	movaps	%xmm0, -400(%rbp)
+	movl	$4, -948(%rbp)
+	movdqa	-400(%rbp), %xmm1
+	movd	-948(%rbp), %xmm0
+	psrlw	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -512(%rbp)
+	movdqa	-528(%rbp), %xmm0
+	movaps	%xmm0, -416(%rbp)
+	movl	$4, -952(%rbp)
+	movdqa	-416(%rbp), %xmm1
+	movd	-952(%rbp), %xmm0
+	psrlw	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -496(%rbp)
+	movdqa	-512(%rbp), %xmm0
+	movaps	%xmm0, -448(%rbp)
+	movdqa	-496(%rbp), %xmm0
+	movaps	%xmm0, -432(%rbp)
+	movdqa	-432(%rbp), %xmm1
+	movdqa	-448(%rbp), %xmm0
+	packuswb	%xmm1, %xmm0
+	movaps	%xmm0, -480(%rbp)
+	movl	-964(%rbp), %eax
+	addl	%eax, %eax
+	movslq	%eax, %rdx
+	movq	-1000(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -936(%rbp)
+	movdqa	-480(%rbp), %xmm0
+	movaps	%xmm0, -464(%rbp)
+	movdqa	-464(%rbp), %xmm0
+	movq	-936(%rbp), %rax
+	movups	%xmm0, (%rax)
+	nop
+	movl	-964(%rbp), %eax
+	cltq
+	leaq	7(%rax), %rdx
+	movq	-1008(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%eax, %edx
+	movl	-964(%rbp), %eax
+	cltq
+	leaq	7(%rax), %rcx
+	movq	-1016(%rbp), %rax
+	addq	%rcx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	%edx, %eax
+	movl	%eax, -960(%rbp)
+	addl	$8, -964(%rbp)
+.L1026:
+	movl	-1020(%rbp), %eax
+	subl	$1, %eax
+	andl	$-8, %eax
+	cmpl	%eax, -964(%rbp)
+	jl	.L1052
+	movl	-960(%rbp), %eax
+	movl	%eax, -956(%rbp)
+	movl	-964(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1008(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	leal	(%rax,%rdx), %ecx
+	movl	-964(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1016(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	%ecx, %eax
+	movl	%eax, -960(%rbp)
+	movl	-960(%rbp), %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%eax, %edx
+	movl	-956(%rbp), %eax
+	addl	%edx, %eax
+	addl	$8, %eax
+	sarl	$4, %eax
+	movl	%eax, %ecx
+	movl	-964(%rbp), %eax
+	addl	%eax, %eax
+	movslq	%eax, %rdx
+	movq	-1000(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	addl	$1, -964(%rbp)
+	jmp	.L1053
+.L1054:
+	movl	-960(%rbp), %eax
+	movl	%eax, -956(%rbp)
+	movl	-964(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1008(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	leal	(%rax,%rdx), %ecx
+	movl	-964(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1016(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	%ecx, %eax
+	movl	%eax, -960(%rbp)
+	movl	-956(%rbp), %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%eax, %edx
+	movl	-960(%rbp), %eax
+	addl	%edx, %eax
+	addl	$8, %eax
+	sarl	$4, %eax
+	movl	%eax, %ecx
+	movl	-964(%rbp), %eax
+	addl	%eax, %eax
+	cltq
+	leaq	-1(%rax), %rdx
+	movq	-1000(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	movl	-960(%rbp), %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%eax, %edx
+	movl	-956(%rbp), %eax
+	addl	%edx, %eax
+	addl	$8, %eax
+	sarl	$4, %eax
+	movl	%eax, %ecx
+	movl	-964(%rbp), %eax
+	addl	%eax, %eax
+	movslq	%eax, %rdx
+	movq	-1000(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	addl	$1, -964(%rbp)
+.L1053:
+	movl	-964(%rbp), %eax
+	cmpl	-1020(%rbp), %eax
+	jl	.L1054
+	movl	-960(%rbp), %eax
+	addl	$2, %eax
+	sarl	$2, %eax
+	movl	%eax, %ecx
+	movl	-1020(%rbp), %eax
+	addl	%eax, %eax
+	cltq
+	leaq	-1(%rax), %rdx
+	movq	-1000(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	movq	-1000(%rbp), %rax
+.L1025:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4978:
+	.size	stbi__resample_row_hv_2_simd, .-stbi__resample_row_hv_2_simd
+	.type	stbi__resample_row_generic, @function
+stbi__resample_row_generic:
+.LFB4979:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movl	%ecx, -44(%rbp)
+	movl	%r8d, -48(%rbp)
+	movl	$0, -8(%rbp)
+	jmp	.L1056
+.L1059:
+	movl	$0, -4(%rbp)
+	jmp	.L1057
+.L1058:
+	movl	-8(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-32(%rbp), %rax
+	leaq	(%rdx,%rax), %rcx
+	movl	-8(%rbp), %eax
+	imull	-48(%rbp), %eax
+	movl	%eax, %edx
+	movl	-4(%rbp), %eax
+	addl	%edx, %eax
+	movslq	%eax, %rdx
+	movq	-24(%rbp), %rax
+	addq	%rax, %rdx
+	movzbl	(%rcx), %eax
+	movb	%al, (%rdx)
+	addl	$1, -4(%rbp)
+.L1057:
+	movl	-4(%rbp), %eax
+	cmpl	-48(%rbp), %eax
+	jl	.L1058
+	addl	$1, -8(%rbp)
+.L1056:
+	movl	-8(%rbp), %eax
+	cmpl	-44(%rbp), %eax
+	jl	.L1059
+	movq	-24(%rbp), %rax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4979:
+	.size	stbi__resample_row_generic, .-stbi__resample_row_generic
+	.type	stbi__YCbCr_to_RGB_row, @function
+stbi__YCbCr_to_RGB_row:
+.LFB4980:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -40(%rbp)
+	movq	%rsi, -48(%rbp)
+	movq	%rdx, -56(%rbp)
+	movq	%rcx, -64(%rbp)
+	movl	%r8d, -68(%rbp)
+	movl	%r9d, -72(%rbp)
+	movl	$0, -28(%rbp)
+	jmp	.L1062
+.L1069:
+	movl	-28(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-48(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	sall	$20, %eax
+	addl	$524288, %eax
+	movl	%eax, -12(%rbp)
+	movl	-28(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	$-128, %eax
+	movl	%eax, -8(%rbp)
+	movl	-28(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-56(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	$-128, %eax
+	movl	%eax, -4(%rbp)
+	movl	-8(%rbp), %eax
+	imull	$1470208, %eax, %edx
+	movl	-12(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -24(%rbp)
+	movl	-8(%rbp), %eax
+	imull	$-748800, %eax, %edx
+	movl	-12(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, %edx
+	movl	-4(%rbp), %eax
+	imull	$-360960, %eax, %eax
+	movw	$0, %ax
+	addl	%edx, %eax
+	movl	%eax, -20(%rbp)
+	movl	-4(%rbp), %eax
+	imull	$1858048, %eax, %edx
+	movl	-12(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -16(%rbp)
+	sarl	$20, -24(%rbp)
+	sarl	$20, -20(%rbp)
+	sarl	$20, -16(%rbp)
+	movl	-24(%rbp), %eax
+	cmpl	$255, %eax
+	jbe	.L1063
+	cmpl	$0, -24(%rbp)
+	jns	.L1064
+	movl	$0, -24(%rbp)
+	jmp	.L1063
+.L1064:
+	movl	$255, -24(%rbp)
+.L1063:
+	movl	-20(%rbp), %eax
+	cmpl	$255, %eax
+	jbe	.L1065
+	cmpl	$0, -20(%rbp)
+	jns	.L1066
+	movl	$0, -20(%rbp)
+	jmp	.L1065
+.L1066:
+	movl	$255, -20(%rbp)
+.L1065:
+	movl	-16(%rbp), %eax
+	cmpl	$255, %eax
+	jbe	.L1067
+	cmpl	$0, -16(%rbp)
+	jns	.L1068
+	movl	$0, -16(%rbp)
+	jmp	.L1067
+.L1068:
+	movl	$255, -16(%rbp)
+.L1067:
+	movl	-24(%rbp), %eax
+	movl	%eax, %edx
+	movq	-40(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-40(%rbp), %rax
+	addq	$1, %rax
+	movl	-20(%rbp), %edx
+	movb	%dl, (%rax)
+	movq	-40(%rbp), %rax
+	addq	$2, %rax
+	movl	-16(%rbp), %edx
+	movb	%dl, (%rax)
+	movq	-40(%rbp), %rax
+	addq	$3, %rax
+	movb	$-1, (%rax)
+	movl	-72(%rbp), %eax
+	cltq
+	addq	%rax, -40(%rbp)
+	addl	$1, -28(%rbp)
+.L1062:
+	movl	-28(%rbp), %eax
+	cmpl	-68(%rbp), %eax
+	jl	.L1069
+	nop
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4980:
+	.size	stbi__YCbCr_to_RGB_row, .-stbi__YCbCr_to_RGB_row
+	.type	stbi__YCbCr_to_RGB_simd, @function
+stbi__YCbCr_to_RGB_simd:
+.LFB4981:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%r15
+	pushq	%r14
+	pushq	%r13
+	pushq	%r12
+	pushq	%rbx
+	subq	$1488, %rsp
+	.cfi_offset 15, -24
+	.cfi_offset 14, -32
+	.cfi_offset 13, -40
+	.cfi_offset 12, -48
+	.cfi_offset 3, -56
+	movq	%rdi, -1592(%rbp)
+	movq	%rsi, -1600(%rbp)
+	movq	%rdx, -1608(%rbp)
+	movq	%rcx, -1616(%rbp)
+	movl	%r8d, -1620(%rbp)
+	movl	%r9d, -1624(%rbp)
+	movl	$0, -1460(%rbp)
+	cmpl	$4, -1624(%rbp)
+	jne	.L1122
+	movb	$-128, -1567(%rbp)
+	movsbl	-1567(%rbp), %edx
+	movsbl	-1567(%rbp), %ecx
+	movsbl	-1567(%rbp), %esi
+	movsbl	-1567(%rbp), %edi
+	movsbl	-1567(%rbp), %r8d
+	movsbl	-1567(%rbp), %r9d
+	movsbl	-1567(%rbp), %r10d
+	movsbl	-1567(%rbp), %r11d
+	movsbl	-1567(%rbp), %ebx
+	movsbl	-1567(%rbp), %r12d
+	movsbl	-1567(%rbp), %r13d
+	movsbl	-1567(%rbp), %r14d
+	movsbl	-1567(%rbp), %r15d
+	movsbl	-1567(%rbp), %eax
+	movl	%eax, -1648(%rbp)
+	movsbl	-1567(%rbp), %eax
+	movl	%eax, -1628(%rbp)
+	movsbl	-1567(%rbp), %eax
+	movb	%al, -1566(%rbp)
+	movzbl	-1628(%rbp), %eax
+	movb	%al, -1565(%rbp)
+	movzbl	-1648(%rbp), %eax
+	movb	%al, -1564(%rbp)
+	movb	%r15b, -1563(%rbp)
+	movb	%r14b, -1562(%rbp)
+	movb	%r13b, -1561(%rbp)
+	movb	%r12b, -1560(%rbp)
+	movb	%bl, -1559(%rbp)
+	movb	%r11b, -1558(%rbp)
+	movb	%r10b, -1557(%rbp)
+	movb	%r9b, -1556(%rbp)
+	movb	%r8b, -1555(%rbp)
+	movb	%dil, -1554(%rbp)
+	movb	%sil, -1553(%rbp)
+	movb	%cl, -1552(%rbp)
+	movb	%dl, -1551(%rbp)
+	movzbl	-1558(%rbp), %edx
+	movzbl	-1557(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1556(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1555(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1554(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1553(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1552(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1551(%rbp), %eax
+	salq	$8, %rdx
+	movq	%rdx, %rcx
+	orq	%rax, %rcx
+	movzbl	-1566(%rbp), %edx
+	movzbl	-1565(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1564(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1563(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1562(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1561(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1560(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1559(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rdx, %rax
+	movq	%rcx, -1648(%rbp)
+	movq	%rax, -1640(%rbp)
+	movdqa	-1648(%rbp), %xmm0
+	nop
+	movaps	%xmm0, -1280(%rbp)
+	movw	$5743, -1478(%rbp)
+	movswl	-1478(%rbp), %eax
+	movswl	-1478(%rbp), %edx
+	movswl	-1478(%rbp), %ecx
+	movswl	-1478(%rbp), %esi
+	movswl	-1478(%rbp), %edi
+	movswl	-1478(%rbp), %r8d
+	movswl	-1478(%rbp), %r9d
+	movswl	-1478(%rbp), %r10d
+	movw	%r10w, -1476(%rbp)
+	movw	%r9w, -1474(%rbp)
+	movw	%r8w, -1472(%rbp)
+	movw	%di, -1470(%rbp)
+	movw	%si, -1468(%rbp)
+	movw	%cx, -1466(%rbp)
+	movw	%dx, -1464(%rbp)
+	movw	%ax, -1462(%rbp)
+	movzwl	-1462(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1464(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm1
+	movzwl	-1466(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1468(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm4
+	movzwl	-1470(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1472(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm2
+	movzwl	-1474(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1476(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm3
+	punpckldq	%xmm4, %xmm1
+	movdqa	%xmm1, %xmm0
+	movdqa	%xmm0, %xmm1
+	punpckldq	%xmm3, %xmm2
+	movdqa	%xmm2, %xmm0
+	movdqa	%xmm0, %xmm2
+	punpcklqdq	%xmm2, %xmm1
+	movdqa	%xmm1, %xmm0
+	nop
+	movaps	%xmm0, -1264(%rbp)
+	movw	$-2925, -1496(%rbp)
+	movswl	-1496(%rbp), %eax
+	movswl	-1496(%rbp), %edx
+	movswl	-1496(%rbp), %ecx
+	movswl	-1496(%rbp), %esi
+	movswl	-1496(%rbp), %edi
+	movswl	-1496(%rbp), %r8d
+	movswl	-1496(%rbp), %r9d
+	movswl	-1496(%rbp), %r10d
+	movw	%r10w, -1494(%rbp)
+	movw	%r9w, -1492(%rbp)
+	movw	%r8w, -1490(%rbp)
+	movw	%di, -1488(%rbp)
+	movw	%si, -1486(%rbp)
+	movw	%cx, -1484(%rbp)
+	movw	%dx, -1482(%rbp)
+	movw	%ax, -1480(%rbp)
+	movzwl	-1480(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1482(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm1
+	movzwl	-1484(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1486(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm4
+	movzwl	-1488(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1490(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm2
+	movzwl	-1492(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1494(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm3
+	punpckldq	%xmm4, %xmm1
+	movdqa	%xmm1, %xmm0
+	movdqa	%xmm0, %xmm1
+	punpckldq	%xmm3, %xmm2
+	movdqa	%xmm2, %xmm0
+	movdqa	%xmm0, %xmm2
+	punpcklqdq	%xmm2, %xmm1
+	movdqa	%xmm1, %xmm0
+	nop
+	movaps	%xmm0, -1248(%rbp)
+	movw	$-1410, -1514(%rbp)
+	movswl	-1514(%rbp), %eax
+	movswl	-1514(%rbp), %edx
+	movswl	-1514(%rbp), %ecx
+	movswl	-1514(%rbp), %esi
+	movswl	-1514(%rbp), %edi
+	movswl	-1514(%rbp), %r8d
+	movswl	-1514(%rbp), %r9d
+	movswl	-1514(%rbp), %r10d
+	movw	%r10w, -1512(%rbp)
+	movw	%r9w, -1510(%rbp)
+	movw	%r8w, -1508(%rbp)
+	movw	%di, -1506(%rbp)
+	movw	%si, -1504(%rbp)
+	movw	%cx, -1502(%rbp)
+	movw	%dx, -1500(%rbp)
+	movw	%ax, -1498(%rbp)
+	movzwl	-1498(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1500(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm1
+	movzwl	-1502(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1504(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm4
+	movzwl	-1506(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1508(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm2
+	movzwl	-1510(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1512(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm3
+	punpckldq	%xmm4, %xmm1
+	movdqa	%xmm1, %xmm0
+	movdqa	%xmm0, %xmm1
+	punpckldq	%xmm3, %xmm2
+	movdqa	%xmm2, %xmm0
+	movdqa	%xmm0, %xmm2
+	punpcklqdq	%xmm2, %xmm1
+	movdqa	%xmm1, %xmm0
+	nop
+	movaps	%xmm0, -1232(%rbp)
+	movw	$7258, -1532(%rbp)
+	movswl	-1532(%rbp), %eax
+	movswl	-1532(%rbp), %edx
+	movswl	-1532(%rbp), %ecx
+	movswl	-1532(%rbp), %esi
+	movswl	-1532(%rbp), %edi
+	movswl	-1532(%rbp), %r8d
+	movswl	-1532(%rbp), %r9d
+	movswl	-1532(%rbp), %r10d
+	movw	%r10w, -1530(%rbp)
+	movw	%r9w, -1528(%rbp)
+	movw	%r8w, -1526(%rbp)
+	movw	%di, -1524(%rbp)
+	movw	%si, -1522(%rbp)
+	movw	%cx, -1520(%rbp)
+	movw	%dx, -1518(%rbp)
+	movw	%ax, -1516(%rbp)
+	movzwl	-1516(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1518(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm1
+	movzwl	-1520(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1522(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm4
+	movzwl	-1524(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1526(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm2
+	movzwl	-1528(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1530(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm3
+	punpckldq	%xmm4, %xmm1
+	movdqa	%xmm1, %xmm0
+	movdqa	%xmm0, %xmm1
+	punpckldq	%xmm3, %xmm2
+	movdqa	%xmm2, %xmm0
+	movdqa	%xmm0, %xmm2
+	punpcklqdq	%xmm2, %xmm1
+	movdqa	%xmm1, %xmm0
+	nop
+	movaps	%xmm0, -1216(%rbp)
+	movb	$-128, -1584(%rbp)
+	movsbl	-1584(%rbp), %edx
+	movsbl	-1584(%rbp), %ecx
+	movsbl	-1584(%rbp), %esi
+	movsbl	-1584(%rbp), %edi
+	movsbl	-1584(%rbp), %r8d
+	movsbl	-1584(%rbp), %r9d
+	movsbl	-1584(%rbp), %r10d
+	movsbl	-1584(%rbp), %r11d
+	movsbl	-1584(%rbp), %ebx
+	movsbl	-1584(%rbp), %r12d
+	movsbl	-1584(%rbp), %r13d
+	movsbl	-1584(%rbp), %r14d
+	movsbl	-1584(%rbp), %r15d
+	movsbl	-1584(%rbp), %eax
+	movl	%eax, -1648(%rbp)
+	movsbl	-1584(%rbp), %eax
+	movl	%eax, -1628(%rbp)
+	movsbl	-1584(%rbp), %eax
+	movb	%al, -1583(%rbp)
+	movzbl	-1628(%rbp), %eax
+	movb	%al, -1582(%rbp)
+	movzbl	-1648(%rbp), %eax
+	movb	%al, -1581(%rbp)
+	movb	%r15b, -1580(%rbp)
+	movb	%r14b, -1579(%rbp)
+	movb	%r13b, -1578(%rbp)
+	movb	%r12b, -1577(%rbp)
+	movb	%bl, -1576(%rbp)
+	movb	%r11b, -1575(%rbp)
+	movb	%r10b, -1574(%rbp)
+	movb	%r9b, -1573(%rbp)
+	movb	%r8b, -1572(%rbp)
+	movb	%dil, -1571(%rbp)
+	movb	%sil, -1570(%rbp)
+	movb	%cl, -1569(%rbp)
+	movb	%dl, -1568(%rbp)
+	movzbl	-1575(%rbp), %edx
+	movzbl	-1574(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1573(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1572(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1571(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1570(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1569(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1568(%rbp), %eax
+	salq	$8, %rdx
+	movq	%rdx, %rcx
+	orq	%rax, %rcx
+	movzbl	-1583(%rbp), %edx
+	movzbl	-1582(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1581(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1580(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1579(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1578(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1577(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rax, %rdx
+	movzbl	-1576(%rbp), %eax
+	salq	$8, %rdx
+	orq	%rdx, %rax
+	movq	%rcx, -1648(%rbp)
+	movq	%rax, -1640(%rbp)
+	movdqa	-1648(%rbp), %xmm0
+	nop
+	movaps	%xmm0, -1200(%rbp)
+	movw	$255, -1550(%rbp)
+	movswl	-1550(%rbp), %eax
+	movswl	-1550(%rbp), %edx
+	movswl	-1550(%rbp), %ecx
+	movswl	-1550(%rbp), %esi
+	movswl	-1550(%rbp), %edi
+	movswl	-1550(%rbp), %r8d
+	movswl	-1550(%rbp), %r9d
+	movswl	-1550(%rbp), %r10d
+	movw	%r10w, -1548(%rbp)
+	movw	%r9w, -1546(%rbp)
+	movw	%r8w, -1544(%rbp)
+	movw	%di, -1542(%rbp)
+	movw	%si, -1540(%rbp)
+	movw	%cx, -1538(%rbp)
+	movw	%dx, -1536(%rbp)
+	movw	%ax, -1534(%rbp)
+	movzwl	-1534(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1536(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm1
+	movzwl	-1538(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1540(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm4
+	movzwl	-1542(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1544(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm2
+	movzwl	-1546(%rbp), %eax
+	movd	%eax, %xmm0
+	movzwl	-1548(%rbp), %eax
+	pinsrw	$1, %eax, %xmm0
+	movdqa	%xmm0, %xmm3
+	punpckldq	%xmm4, %xmm1
+	movdqa	%xmm1, %xmm0
+	movdqa	%xmm0, %xmm1
+	punpckldq	%xmm3, %xmm2
+	movdqa	%xmm2, %xmm0
+	movdqa	%xmm0, %xmm2
+	punpcklqdq	%xmm2, %xmm1
+	movdqa	%xmm1, %xmm0
+	nop
+	movaps	%xmm0, -1184(%rbp)
+	jmp	.L1086
+.L1121:
+	movl	-1460(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1600(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -1320(%rbp)
+	movq	-1320(%rbp), %rax
+	movq	(%rax), %rax
+	movl	$0, %edx
+	movq	%rdx, -1312(%rbp)
+	movq	%rax, -1304(%rbp)
+	movq	-1304(%rbp), %rax
+	movq	-1312(%rbp), %rdx
+	movq	%rdx, -1296(%rbp)
+	movq	%rax, -1288(%rbp)
+	movq	-1296(%rbp), %xmm1
+	movq	-1288(%rbp), %xmm0
+	punpcklqdq	%xmm1, %xmm0
+	nop
+	nop
+	movaps	%xmm0, -1168(%rbp)
+	movl	-1460(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1616(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -1360(%rbp)
+	movq	-1360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	$0, %edx
+	movq	%rdx, -1352(%rbp)
+	movq	%rax, -1344(%rbp)
+	movq	-1344(%rbp), %rax
+	movq	-1352(%rbp), %rdx
+	movq	%rdx, -1336(%rbp)
+	movq	%rax, -1328(%rbp)
+	movq	-1336(%rbp), %xmm1
+	movq	-1328(%rbp), %xmm0
+	punpcklqdq	%xmm1, %xmm0
+	nop
+	nop
+	movaps	%xmm0, -1152(%rbp)
+	movl	-1460(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1608(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -1400(%rbp)
+	movq	-1400(%rbp), %rax
+	movq	(%rax), %rax
+	movl	$0, %edx
+	movq	%rdx, -1392(%rbp)
+	movq	%rax, -1384(%rbp)
+	movq	-1384(%rbp), %rax
+	movq	-1392(%rbp), %rdx
+	movq	%rdx, -1376(%rbp)
+	movq	%rax, -1368(%rbp)
+	movq	-1376(%rbp), %xmm1
+	movq	-1368(%rbp), %xmm0
+	punpcklqdq	%xmm1, %xmm0
+	nop
+	nop
+	movaps	%xmm0, -1136(%rbp)
+	movdqa	-1152(%rbp), %xmm0
+	movaps	%xmm0, -80(%rbp)
+	movdqa	-1280(%rbp), %xmm0
+	movaps	%xmm0, -64(%rbp)
+	movdqa	-80(%rbp), %xmm1
+	movdqa	-64(%rbp), %xmm0
+	pxor	%xmm1, %xmm0
+	movaps	%xmm0, -1120(%rbp)
+	movdqa	-1136(%rbp), %xmm0
+	movaps	%xmm0, -112(%rbp)
+	movdqa	-1280(%rbp), %xmm0
+	movaps	%xmm0, -96(%rbp)
+	movdqa	-112(%rbp), %xmm1
+	movdqa	-96(%rbp), %xmm0
+	pxor	%xmm1, %xmm0
+	movaps	%xmm0, -1104(%rbp)
+	movdqa	-1200(%rbp), %xmm0
+	movaps	%xmm0, -144(%rbp)
+	movdqa	-1168(%rbp), %xmm0
+	movaps	%xmm0, -128(%rbp)
+	movdqa	-128(%rbp), %xmm1
+	movdqa	-144(%rbp), %xmm0
+	punpcklbw	%xmm1, %xmm0
+	movaps	%xmm0, -1088(%rbp)
+	pxor	%xmm0, %xmm0
+	movaps	%xmm0, -176(%rbp)
+	movdqa	-1120(%rbp), %xmm0
+	movaps	%xmm0, -160(%rbp)
+	movdqa	-160(%rbp), %xmm1
+	movdqa	-176(%rbp), %xmm0
+	punpcklbw	%xmm1, %xmm0
+	movaps	%xmm0, -1072(%rbp)
+	pxor	%xmm0, %xmm0
+	movaps	%xmm0, -208(%rbp)
+	movdqa	-1104(%rbp), %xmm0
+	movaps	%xmm0, -192(%rbp)
+	movdqa	-192(%rbp), %xmm1
+	movdqa	-208(%rbp), %xmm0
+	punpcklbw	%xmm1, %xmm0
+	movaps	%xmm0, -1056(%rbp)
+	movdqa	-1088(%rbp), %xmm0
+	movaps	%xmm0, -224(%rbp)
+	movl	$4, -1420(%rbp)
+	movdqa	-224(%rbp), %xmm1
+	movd	-1420(%rbp), %xmm0
+	psrlw	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -1040(%rbp)
+	movdqa	-1264(%rbp), %xmm0
+	movaps	%xmm0, -256(%rbp)
+	movdqa	-1072(%rbp), %xmm0
+	movaps	%xmm0, -240(%rbp)
+	movdqa	-240(%rbp), %xmm0
+	movdqa	-256(%rbp), %xmm1
+	pmulhw	%xmm1, %xmm0
+	movaps	%xmm0, -1024(%rbp)
+	movdqa	-1232(%rbp), %xmm0
+	movaps	%xmm0, -288(%rbp)
+	movdqa	-1056(%rbp), %xmm0
+	movaps	%xmm0, -272(%rbp)
+	movdqa	-272(%rbp), %xmm0
+	movdqa	-288(%rbp), %xmm1
+	pmulhw	%xmm1, %xmm0
+	movaps	%xmm0, -1008(%rbp)
+	movdqa	-1056(%rbp), %xmm0
+	movaps	%xmm0, -320(%rbp)
+	movdqa	-1216(%rbp), %xmm0
+	movaps	%xmm0, -304(%rbp)
+	movdqa	-304(%rbp), %xmm0
+	movdqa	-320(%rbp), %xmm1
+	pmulhw	%xmm1, %xmm0
+	movaps	%xmm0, -992(%rbp)
+	movdqa	-1072(%rbp), %xmm0
+	movaps	%xmm0, -352(%rbp)
+	movdqa	-1248(%rbp), %xmm0
+	movaps	%xmm0, -336(%rbp)
+	movdqa	-336(%rbp), %xmm0
+	movdqa	-352(%rbp), %xmm1
+	pmulhw	%xmm1, %xmm0
+	movaps	%xmm0, -976(%rbp)
+	movdqa	-1024(%rbp), %xmm0
+	movaps	%xmm0, -384(%rbp)
+	movdqa	-1040(%rbp), %xmm0
+	movaps	%xmm0, -368(%rbp)
+	movdqa	-384(%rbp), %xmm1
+	movdqa	-368(%rbp), %xmm0
+	paddw	%xmm1, %xmm0
+	movaps	%xmm0, -960(%rbp)
+	movdqa	-1008(%rbp), %xmm0
+	movaps	%xmm0, -416(%rbp)
+	movdqa	-1040(%rbp), %xmm0
+	movaps	%xmm0, -400(%rbp)
+	movdqa	-416(%rbp), %xmm1
+	movdqa	-400(%rbp), %xmm0
+	paddw	%xmm1, %xmm0
+	movaps	%xmm0, -944(%rbp)
+	movdqa	-1040(%rbp), %xmm0
+	movaps	%xmm0, -448(%rbp)
+	movdqa	-992(%rbp), %xmm0
+	movaps	%xmm0, -432(%rbp)
+	movdqa	-448(%rbp), %xmm1
+	movdqa	-432(%rbp), %xmm0
+	paddw	%xmm1, %xmm0
+	movaps	%xmm0, -928(%rbp)
+	movdqa	-944(%rbp), %xmm0
+	movaps	%xmm0, -480(%rbp)
+	movdqa	-976(%rbp), %xmm0
+	movaps	%xmm0, -464(%rbp)
+	movdqa	-480(%rbp), %xmm1
+	movdqa	-464(%rbp), %xmm0
+	paddw	%xmm1, %xmm0
+	movaps	%xmm0, -912(%rbp)
+	movdqa	-960(%rbp), %xmm0
+	movaps	%xmm0, -496(%rbp)
+	movl	$4, -1424(%rbp)
+	movdqa	-496(%rbp), %xmm1
+	movd	-1424(%rbp), %xmm0
+	psraw	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -896(%rbp)
+	movdqa	-928(%rbp), %xmm0
+	movaps	%xmm0, -512(%rbp)
+	movl	$4, -1428(%rbp)
+	movdqa	-512(%rbp), %xmm1
+	movd	-1428(%rbp), %xmm0
+	psraw	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -880(%rbp)
+	movdqa	-912(%rbp), %xmm0
+	movaps	%xmm0, -528(%rbp)
+	movl	$4, -1432(%rbp)
+	movdqa	-528(%rbp), %xmm1
+	movd	-1432(%rbp), %xmm0
+	psraw	%xmm0, %xmm1
+	movdqa	%xmm1, %xmm0
+	movaps	%xmm0, -864(%rbp)
+	movdqa	-896(%rbp), %xmm0
+	movaps	%xmm0, -560(%rbp)
+	movdqa	-880(%rbp), %xmm0
+	movaps	%xmm0, -544(%rbp)
+	movdqa	-544(%rbp), %xmm1
+	movdqa	-560(%rbp), %xmm0
+	packuswb	%xmm1, %xmm0
+	movaps	%xmm0, -848(%rbp)
+	movdqa	-864(%rbp), %xmm0
+	movaps	%xmm0, -592(%rbp)
+	movdqa	-1184(%rbp), %xmm0
+	movaps	%xmm0, -576(%rbp)
+	movdqa	-576(%rbp), %xmm1
+	movdqa	-592(%rbp), %xmm0
+	packuswb	%xmm1, %xmm0
+	movaps	%xmm0, -832(%rbp)
+	movdqa	-848(%rbp), %xmm0
+	movaps	%xmm0, -624(%rbp)
+	movdqa	-832(%rbp), %xmm0
+	movaps	%xmm0, -608(%rbp)
+	movdqa	-608(%rbp), %xmm1
+	movdqa	-624(%rbp), %xmm0
+	punpcklbw	%xmm1, %xmm0
+	movaps	%xmm0, -816(%rbp)
+	movdqa	-848(%rbp), %xmm0
+	movaps	%xmm0, -656(%rbp)
+	movdqa	-832(%rbp), %xmm0
+	movaps	%xmm0, -640(%rbp)
+	movdqa	-640(%rbp), %xmm1
+	movdqa	-656(%rbp), %xmm0
+	punpckhbw	%xmm1, %xmm0
+	movaps	%xmm0, -800(%rbp)
+	movdqa	-816(%rbp), %xmm0
+	movaps	%xmm0, -688(%rbp)
+	movdqa	-800(%rbp), %xmm0
+	movaps	%xmm0, -672(%rbp)
+	movdqa	-672(%rbp), %xmm1
+	movdqa	-688(%rbp), %xmm0
+	punpcklwd	%xmm1, %xmm0
+	movaps	%xmm0, -784(%rbp)
+	movdqa	-816(%rbp), %xmm0
+	movaps	%xmm0, -720(%rbp)
+	movdqa	-800(%rbp), %xmm0
+	movaps	%xmm0, -704(%rbp)
+	movdqa	-704(%rbp), %xmm1
+	movdqa	-720(%rbp), %xmm0
+	punpckhwd	%xmm1, %xmm0
+	movaps	%xmm0, -768(%rbp)
+	movq	-1592(%rbp), %rax
+	movq	%rax, -1408(%rbp)
+	movdqa	-784(%rbp), %xmm0
+	movaps	%xmm0, -736(%rbp)
+	movdqa	-736(%rbp), %xmm0
+	movq	-1408(%rbp), %rax
+	movups	%xmm0, (%rax)
+	nop
+	movq	-1592(%rbp), %rax
+	addq	$16, %rax
+	movq	%rax, -1416(%rbp)
+	movdqa	-768(%rbp), %xmm0
+	movaps	%xmm0, -752(%rbp)
+	movdqa	-752(%rbp), %xmm0
+	movq	-1416(%rbp), %rax
+	movups	%xmm0, (%rax)
+	nop
+	addq	$32, -1592(%rbp)
+	addl	$8, -1460(%rbp)
+.L1086:
+	movl	-1460(%rbp), %eax
+	addl	$7, %eax
+	cmpl	%eax, -1620(%rbp)
+	jg	.L1121
+	jmp	.L1122
+.L1129:
+	movl	-1460(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1600(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	sall	$20, %eax
+	addl	$524288, %eax
+	movl	%eax, -1444(%rbp)
+	movl	-1460(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1616(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	$-128, %eax
+	movl	%eax, -1440(%rbp)
+	movl	-1460(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1608(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	$-128, %eax
+	movl	%eax, -1436(%rbp)
+	movl	-1440(%rbp), %eax
+	imull	$1470208, %eax, %edx
+	movl	-1444(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -1456(%rbp)
+	movl	-1440(%rbp), %eax
+	imull	$-748800, %eax, %edx
+	movl	-1444(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, %edx
+	movl	-1436(%rbp), %eax
+	imull	$-360960, %eax, %eax
+	movw	$0, %ax
+	addl	%edx, %eax
+	movl	%eax, -1452(%rbp)
+	movl	-1436(%rbp), %eax
+	imull	$1858048, %eax, %edx
+	movl	-1444(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -1448(%rbp)
+	sarl	$20, -1456(%rbp)
+	sarl	$20, -1452(%rbp)
+	sarl	$20, -1448(%rbp)
+	movl	-1456(%rbp), %eax
+	cmpl	$255, %eax
+	jbe	.L1123
+	cmpl	$0, -1456(%rbp)
+	jns	.L1124
+	movl	$0, -1456(%rbp)
+	jmp	.L1123
+.L1124:
+	movl	$255, -1456(%rbp)
+.L1123:
+	movl	-1452(%rbp), %eax
+	cmpl	$255, %eax
+	jbe	.L1125
+	cmpl	$0, -1452(%rbp)
+	jns	.L1126
+	movl	$0, -1452(%rbp)
+	jmp	.L1125
+.L1126:
+	movl	$255, -1452(%rbp)
+.L1125:
+	movl	-1448(%rbp), %eax
+	cmpl	$255, %eax
+	jbe	.L1127
+	cmpl	$0, -1448(%rbp)
+	jns	.L1128
+	movl	$0, -1448(%rbp)
+	jmp	.L1127
+.L1128:
+	movl	$255, -1448(%rbp)
+.L1127:
+	movl	-1456(%rbp), %eax
+	movl	%eax, %edx
+	movq	-1592(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-1592(%rbp), %rax
+	addq	$1, %rax
+	movl	-1452(%rbp), %edx
+	movb	%dl, (%rax)
+	movq	-1592(%rbp), %rax
+	addq	$2, %rax
+	movl	-1448(%rbp), %edx
+	movb	%dl, (%rax)
+	movq	-1592(%rbp), %rax
+	addq	$3, %rax
+	movb	$-1, (%rax)
+	movl	-1624(%rbp), %eax
+	cltq
+	addq	%rax, -1592(%rbp)
+	addl	$1, -1460(%rbp)
+.L1122:
+	movl	-1460(%rbp), %eax
+	cmpl	-1620(%rbp), %eax
+	jl	.L1129
+	nop
+	nop
+	addq	$1488, %rsp
+	popq	%rbx
+	popq	%r12
+	popq	%r13
+	popq	%r14
+	popq	%r15
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4981:
+	.size	stbi__YCbCr_to_RGB_simd, .-stbi__YCbCr_to_RGB_simd
+	.type	stbi__setup_jpeg, @function
+stbi__setup_jpeg:
+.LFB4982:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$8, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	-8(%rbp), %rax
+	leaq	stbi__idct_block(%rip), %rdx
+	movq	%rdx, 18544(%rax)
+	movq	-8(%rbp), %rax
+	leaq	stbi__YCbCr_to_RGB_row(%rip), %rdx
+	movq	%rdx, 18552(%rax)
+	movq	-8(%rbp), %rax
+	leaq	stbi__resample_row_hv_2(%rip), %rdx
+	movq	%rdx, 18560(%rax)
+	call	stbi__sse2_available
+	testl	%eax, %eax
+	je	.L1132
+	movq	-8(%rbp), %rax
+	leaq	stbi__idct_simd(%rip), %rdx
+	movq	%rdx, 18544(%rax)
+	movq	-8(%rbp), %rax
+	leaq	stbi__YCbCr_to_RGB_simd(%rip), %rdx
+	movq	%rdx, 18552(%rax)
+	movq	-8(%rbp), %rax
+	leaq	stbi__resample_row_hv_2_simd(%rip), %rdx
+	movq	%rdx, 18560(%rax)
+.L1132:
+	nop
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4982:
+	.size	stbi__setup_jpeg, .-stbi__setup_jpeg
+	.type	stbi__cleanup_jpeg, @function
+stbi__cleanup_jpeg:
+.LFB4983:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %ecx
+	movq	-8(%rbp), %rax
+	movl	$0, %edx
+	movl	%ecx, %esi
+	movq	%rax, %rdi
+	call	stbi__free_jpeg_components
+	nop
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4983:
+	.size	stbi__cleanup_jpeg, .-stbi__cleanup_jpeg
+	.type	stbi__blinn_8x8, @function
+stbi__blinn_8x8:
+.LFB4984:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, %edx
+	movl	%esi, %eax
+	movb	%dl, -20(%rbp)
+	movb	%al, -24(%rbp)
+	movzbl	-20(%rbp), %edx
+	movzbl	-24(%rbp), %eax
+	imull	%edx, %eax
+	subl	$-128, %eax
+	movl	%eax, -4(%rbp)
+	movl	-4(%rbp), %eax
+	shrl	$8, %eax
+	movl	%eax, %edx
+	movl	-4(%rbp), %eax
+	addl	%edx, %eax
+	shrl	$8, %eax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4984:
+	.size	stbi__blinn_8x8, .-stbi__blinn_8x8
+	.type	load_jpeg_image, @function
+load_jpeg_image:
+.LFB4985:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$392, %rsp
+	.cfi_offset 3, -24
+	movq	%rdi, -360(%rbp)
+	movq	%rsi, -368(%rbp)
+	movq	%rdx, -376(%rbp)
+	movq	%rcx, -384(%rbp)
+	movl	%r8d, -388(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -24(%rbp)
+	xorl	%eax, %eax
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	$0, 8(%rax)
+	cmpl	$0, -388(%rbp)
+	js	.L1137
+	cmpl	$4, -388(%rbp)
+	jle	.L1138
+.L1137:
+	movl	$0, %eax
+	jmp	.L1139
+.L1138:
+	movq	-360(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__decode_jpeg_image
+	testl	%eax, %eax
+	jne	.L1140
+	movq	-360(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__cleanup_jpeg
+	movl	$0, %eax
+	jmp	.L1139
+.L1140:
+	cmpl	$0, -388(%rbp)
+	jne	.L1141
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %eax
+	cmpl	$2, %eax
+	jle	.L1142
+	movl	$3, %eax
+	jmp	.L1144
+.L1142:
+	movl	$1, %eax
+	jmp	.L1144
+.L1141:
+	movl	-388(%rbp), %eax
+.L1144:
+	movl	%eax, -316(%rbp)
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %eax
+	cmpl	$3, %eax
+	jne	.L1145
+	movq	-360(%rbp), %rax
+	movl	18512(%rax), %eax
+	cmpl	$3, %eax
+	je	.L1146
+	movq	-360(%rbp), %rax
+	movl	18508(%rax), %eax
+	testl	%eax, %eax
+	jne	.L1145
+	movq	-360(%rbp), %rax
+	movl	18504(%rax), %eax
+	testl	%eax, %eax
+	jne	.L1145
+.L1146:
+	movl	$1, %eax
+	jmp	.L1147
+.L1145:
+	movl	$0, %eax
+.L1147:
+	movl	%eax, -312(%rbp)
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %eax
+	cmpl	$3, %eax
+	jne	.L1148
+	cmpl	$2, -316(%rbp)
+	jg	.L1148
+	cmpl	$0, -312(%rbp)
+	jne	.L1148
+	movl	$1, -332(%rbp)
+	jmp	.L1149
+.L1148:
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %eax
+	movl	%eax, -332(%rbp)
+.L1149:
+	cmpl	$0, -332(%rbp)
+	jg	.L1150
+	movq	-360(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__cleanup_jpeg
+	movl	$0, %eax
+	jmp	.L1139
+.L1150:
+	movq	$0, -256(%rbp)
+	movq	$0, -248(%rbp)
+	movq	$0, -240(%rbp)
+	movq	$0, -232(%rbp)
+	movl	$0, -328(%rbp)
+	jmp	.L1151
+.L1159:
+	leaq	-224(%rbp), %rcx
+	movl	-328(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$4, %rax
+	addq	%rcx, %rax
+	movq	%rax, -264(%rbp)
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	addl	$3, %eax
+	movl	%eax, %eax
+	movq	%rax, %rdi
+	call	stbi__malloc
+	movq	%rax, %rcx
+	movq	-360(%rbp), %rsi
+	movl	-328(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18152, %rax
+	movq	%rcx, (%rax)
+	movq	-360(%rbp), %rcx
+	movl	-328(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18152, %rax
+	movq	(%rax), %rax
+	testq	%rax, %rax
+	jne	.L1152
+	movq	-360(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__cleanup_jpeg
+	movl	$0, %eax
+	jmp	.L1139
+.L1152:
+	movq	-360(%rbp), %rax
+	movl	18056(%rax), %ecx
+	movq	-360(%rbp), %rsi
+	movl	-328(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18084, %rax
+	movl	(%rax), %ebx
+	movl	%ecx, %eax
+	cltd
+	idivl	%ebx
+	movl	%eax, %edx
+	movq	-264(%rbp), %rax
+	movl	%edx, 24(%rax)
+	movq	-360(%rbp), %rax
+	movl	18060(%rax), %ecx
+	movq	-360(%rbp), %rsi
+	movl	-328(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18088, %rax
+	movl	(%rax), %ebx
+	movl	%ecx, %eax
+	cltd
+	idivl	%ebx
+	movl	%eax, %edx
+	movq	-264(%rbp), %rax
+	movl	%edx, 28(%rax)
+	movq	-264(%rbp), %rax
+	movl	28(%rax), %eax
+	sarl	%eax
+	movl	%eax, %edx
+	movq	-264(%rbp), %rax
+	movl	%edx, 36(%rax)
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %edx
+	movq	-264(%rbp), %rax
+	movl	24(%rax), %eax
+	addl	%edx, %eax
+	leal	-1(%rax), %edx
+	movq	-264(%rbp), %rax
+	movl	24(%rax), %eax
+	movl	%eax, %ecx
+	movl	%edx, %eax
+	movl	$0, %edx
+	divl	%ecx
+	movl	%eax, %edx
+	movq	-264(%rbp), %rax
+	movl	%edx, 32(%rax)
+	movq	-264(%rbp), %rax
+	movl	$0, 40(%rax)
+	movq	-360(%rbp), %rcx
+	movl	-328(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rcx, %rax
+	addq	$18128, %rax
+	movq	(%rax), %rdx
+	movq	-264(%rbp), %rax
+	movq	%rdx, 16(%rax)
+	movq	-264(%rbp), %rax
+	movq	16(%rax), %rdx
+	movq	-264(%rbp), %rax
+	movq	%rdx, 8(%rax)
+	movq	-264(%rbp), %rax
+	movl	24(%rax), %eax
+	cmpl	$1, %eax
+	jne	.L1154
+	movq	-264(%rbp), %rax
+	movl	28(%rax), %eax
+	cmpl	$1, %eax
+	jne	.L1154
+	movq	-264(%rbp), %rax
+	leaq	resample_row_1(%rip), %rdx
+	movq	%rdx, (%rax)
+	jmp	.L1155
+.L1154:
+	movq	-264(%rbp), %rax
+	movl	24(%rax), %eax
+	cmpl	$1, %eax
+	jne	.L1156
+	movq	-264(%rbp), %rax
+	movl	28(%rax), %eax
+	cmpl	$2, %eax
+	jne	.L1156
+	movq	-264(%rbp), %rax
+	leaq	stbi__resample_row_v_2(%rip), %rdx
+	movq	%rdx, (%rax)
+	jmp	.L1155
+.L1156:
+	movq	-264(%rbp), %rax
+	movl	24(%rax), %eax
+	cmpl	$2, %eax
+	jne	.L1157
+	movq	-264(%rbp), %rax
+	movl	28(%rax), %eax
+	cmpl	$1, %eax
+	jne	.L1157
+	movq	-264(%rbp), %rax
+	leaq	stbi__resample_row_h_2(%rip), %rdx
+	movq	%rdx, (%rax)
+	jmp	.L1155
+.L1157:
+	movq	-264(%rbp), %rax
+	movl	24(%rax), %eax
+	cmpl	$2, %eax
+	jne	.L1158
+	movq	-264(%rbp), %rax
+	movl	28(%rax), %eax
+	cmpl	$2, %eax
+	jne	.L1158
+	movq	-360(%rbp), %rax
+	movq	18560(%rax), %rdx
+	movq	-264(%rbp), %rax
+	movq	%rdx, (%rax)
+	jmp	.L1155
+.L1158:
+	movq	-264(%rbp), %rax
+	leaq	stbi__resample_row_generic(%rip), %rdx
+	movq	%rdx, (%rax)
+.L1155:
+	addl	$1, -328(%rbp)
+.L1151:
+	movl	-328(%rbp), %eax
+	cmpl	-332(%rbp), %eax
+	jl	.L1159
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	4(%rax), %eax
+	movl	%eax, %edx
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	movl	%eax, %esi
+	movl	-316(%rbp), %eax
+	movl	$1, %ecx
+	movl	%eax, %edi
+	call	stbi__malloc_mad3
+	movq	%rax, -296(%rbp)
+	cmpq	$0, -296(%rbp)
+	jne	.L1160
+	movq	-360(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__cleanup_jpeg
+	movl	$0, %eax
+	jmp	.L1139
+.L1160:
+	movl	$0, -320(%rbp)
+	jmp	.L1161
+.L1201:
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %edx
+	movl	-316(%rbp), %eax
+	imull	%edx, %eax
+	imull	-320(%rbp), %eax
+	movl	%eax, %edx
+	movq	-296(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -304(%rbp)
+	movl	$0, -328(%rbp)
+	jmp	.L1162
+.L1168:
+	leaq	-224(%rbp), %rcx
+	movl	-328(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$4, %rax
+	addq	%rcx, %rax
+	movq	%rax, -272(%rbp)
+	movq	-272(%rbp), %rax
+	movl	36(%rax), %edx
+	movq	-272(%rbp), %rax
+	movl	28(%rax), %eax
+	sarl	%eax
+	cmpl	%eax, %edx
+	setge	%al
+	movzbl	%al, %eax
+	movl	%eax, -308(%rbp)
+	movq	-272(%rbp), %rax
+	movq	(%rax), %r9
+	movq	-272(%rbp), %rax
+	movl	24(%rax), %r10d
+	movq	-272(%rbp), %rax
+	movl	32(%rax), %edi
+	cmpl	$0, -308(%rbp)
+	je	.L1163
+	movq	-272(%rbp), %rax
+	movq	8(%rax), %rdx
+	jmp	.L1164
+.L1163:
+	movq	-272(%rbp), %rax
+	movq	16(%rax), %rdx
+.L1164:
+	cmpl	$0, -308(%rbp)
+	je	.L1165
+	movq	-272(%rbp), %rax
+	movq	16(%rax), %rsi
+	jmp	.L1166
+.L1165:
+	movq	-272(%rbp), %rax
+	movq	8(%rax), %rsi
+.L1166:
+	movq	-360(%rbp), %r8
+	movl	-328(%rbp), %eax
+	movslq	%eax, %rcx
+	movq	%rcx, %rax
+	addq	%rax, %rax
+	addq	%rcx, %rax
+	salq	$5, %rax
+	addq	%r8, %rax
+	addq	$18152, %rax
+	movq	(%rax), %rax
+	movl	%r10d, %r8d
+	movl	%edi, %ecx
+	movq	%rax, %rdi
+	call	*%r9
+	movl	-328(%rbp), %edx
+	movslq	%edx, %rdx
+	movq	%rax, -256(%rbp,%rdx,8)
+	movq	-272(%rbp), %rax
+	movl	36(%rax), %eax
+	leal	1(%rax), %edx
+	movq	-272(%rbp), %rax
+	movl	%edx, 36(%rax)
+	movq	-272(%rbp), %rax
+	movl	36(%rax), %edx
+	movq	-272(%rbp), %rax
+	movl	28(%rax), %eax
+	cmpl	%eax, %edx
+	jl	.L1167
+	movq	-272(%rbp), %rax
+	movl	$0, 36(%rax)
+	movq	-272(%rbp), %rax
+	movq	16(%rax), %rdx
+	movq	-272(%rbp), %rax
+	movq	%rdx, 8(%rax)
+	movq	-272(%rbp), %rax
+	movl	40(%rax), %eax
+	leal	1(%rax), %edx
+	movq	-272(%rbp), %rax
+	movl	%edx, 40(%rax)
+	movq	-272(%rbp), %rax
+	movl	40(%rax), %ecx
+	movq	-360(%rbp), %rsi
+	movl	-328(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18112, %rax
+	movl	(%rax), %eax
+	cmpl	%eax, %ecx
+	jge	.L1167
+	movq	-272(%rbp), %rax
+	movq	16(%rax), %rcx
+	movq	-360(%rbp), %rsi
+	movl	-328(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$5, %rax
+	addq	%rsi, %rax
+	addq	$18116, %rax
+	movl	(%rax), %eax
+	cltq
+	leaq	(%rcx,%rax), %rdx
+	movq	-272(%rbp), %rax
+	movq	%rdx, 16(%rax)
+.L1167:
+	addl	$1, -328(%rbp)
+.L1162:
+	movl	-328(%rbp), %eax
+	cmpl	-332(%rbp), %eax
+	jl	.L1168
+	cmpl	$2, -316(%rbp)
+	jle	.L1169
+	movq	-256(%rbp), %rax
+	movq	%rax, -280(%rbp)
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %eax
+	cmpl	$3, %eax
+	jne	.L1170
+	cmpl	$0, -312(%rbp)
+	je	.L1171
+	movl	$0, -324(%rbp)
+	jmp	.L1172
+.L1173:
+	movl	-324(%rbp), %edx
+	movq	-280(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %edx
+	movq	-304(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-248(%rbp), %rdx
+	movl	-324(%rbp), %eax
+	addq	%rdx, %rax
+	movq	-304(%rbp), %rdx
+	addq	$1, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	movq	-240(%rbp), %rdx
+	movl	-324(%rbp), %eax
+	addq	%rdx, %rax
+	movq	-304(%rbp), %rdx
+	addq	$2, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	movq	-304(%rbp), %rax
+	addq	$3, %rax
+	movb	$-1, (%rax)
+	movl	-316(%rbp), %eax
+	cltq
+	addq	%rax, -304(%rbp)
+	addl	$1, -324(%rbp)
+.L1172:
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -324(%rbp)
+	jb	.L1173
+	jmp	.L1174
+.L1171:
+	movq	-360(%rbp), %rax
+	movq	18552(%rax), %r10
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	movl	%eax, %r8d
+	movq	-240(%rbp), %rcx
+	movq	-248(%rbp), %rdx
+	movl	-316(%rbp), %edi
+	movq	-280(%rbp), %rsi
+	movq	-304(%rbp), %rax
+	movl	%edi, %r9d
+	movq	%rax, %rdi
+	call	*%r10
+	jmp	.L1174
+.L1170:
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %eax
+	cmpl	$4, %eax
+	jne	.L1175
+	movq	-360(%rbp), %rax
+	movl	18508(%rax), %eax
+	testl	%eax, %eax
+	jne	.L1176
+	movl	$0, -324(%rbp)
+	jmp	.L1177
+.L1178:
+	movq	-232(%rbp), %rdx
+	movl	-324(%rbp), %eax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movb	%al, -333(%rbp)
+	movzbl	-333(%rbp), %edx
+	movq	-256(%rbp), %rcx
+	movl	-324(%rbp), %eax
+	addq	%rcx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__blinn_8x8
+	movq	-304(%rbp), %rdx
+	movb	%al, (%rdx)
+	movzbl	-333(%rbp), %edx
+	movq	-248(%rbp), %rcx
+	movl	-324(%rbp), %eax
+	addq	%rcx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	movq	-304(%rbp), %rcx
+	leaq	1(%rcx), %rbx
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__blinn_8x8
+	movb	%al, (%rbx)
+	movzbl	-333(%rbp), %edx
+	movq	-240(%rbp), %rcx
+	movl	-324(%rbp), %eax
+	addq	%rcx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	movq	-304(%rbp), %rcx
+	leaq	2(%rcx), %rbx
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__blinn_8x8
+	movb	%al, (%rbx)
+	movq	-304(%rbp), %rax
+	addq	$3, %rax
+	movb	$-1, (%rax)
+	movl	-316(%rbp), %eax
+	cltq
+	addq	%rax, -304(%rbp)
+	addl	$1, -324(%rbp)
+.L1177:
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -324(%rbp)
+	jb	.L1178
+	jmp	.L1174
+.L1176:
+	movq	-360(%rbp), %rax
+	movl	18508(%rax), %eax
+	cmpl	$2, %eax
+	jne	.L1179
+	movq	-360(%rbp), %rax
+	movq	18552(%rax), %r10
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	movl	%eax, %r8d
+	movq	-240(%rbp), %rcx
+	movq	-248(%rbp), %rdx
+	movl	-316(%rbp), %edi
+	movq	-280(%rbp), %rsi
+	movq	-304(%rbp), %rax
+	movl	%edi, %r9d
+	movq	%rax, %rdi
+	call	*%r10
+	movl	$0, -324(%rbp)
+	jmp	.L1180
+.L1181:
+	movq	-232(%rbp), %rdx
+	movl	-324(%rbp), %eax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movb	%al, -334(%rbp)
+	movzbl	-334(%rbp), %edx
+	movq	-304(%rbp), %rax
+	movzbl	(%rax), %eax
+	notl	%eax
+	movzbl	%al, %eax
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__blinn_8x8
+	movq	-304(%rbp), %rdx
+	movb	%al, (%rdx)
+	movzbl	-334(%rbp), %edx
+	movq	-304(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rax), %eax
+	notl	%eax
+	movzbl	%al, %eax
+	movq	-304(%rbp), %rcx
+	leaq	1(%rcx), %rbx
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__blinn_8x8
+	movb	%al, (%rbx)
+	movzbl	-334(%rbp), %edx
+	movq	-304(%rbp), %rax
+	addq	$2, %rax
+	movzbl	(%rax), %eax
+	notl	%eax
+	movzbl	%al, %eax
+	movq	-304(%rbp), %rcx
+	leaq	2(%rcx), %rbx
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__blinn_8x8
+	movb	%al, (%rbx)
+	movl	-316(%rbp), %eax
+	cltq
+	addq	%rax, -304(%rbp)
+	addl	$1, -324(%rbp)
+.L1180:
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -324(%rbp)
+	jb	.L1181
+	jmp	.L1174
+.L1179:
+	movq	-360(%rbp), %rax
+	movq	18552(%rax), %r10
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	movl	%eax, %r8d
+	movq	-240(%rbp), %rcx
+	movq	-248(%rbp), %rdx
+	movl	-316(%rbp), %edi
+	movq	-280(%rbp), %rsi
+	movq	-304(%rbp), %rax
+	movl	%edi, %r9d
+	movq	%rax, %rdi
+	call	*%r10
+	jmp	.L1174
+.L1175:
+	movl	$0, -324(%rbp)
+	jmp	.L1182
+.L1183:
+	movl	-324(%rbp), %edx
+	movq	-280(%rbp), %rax
+	addq	%rdx, %rax
+	movq	-304(%rbp), %rdx
+	addq	$2, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	movq	-304(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rdx), %edx
+	movb	%dl, (%rax)
+	movzbl	(%rax), %edx
+	movq	-304(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-304(%rbp), %rax
+	addq	$3, %rax
+	movb	$-1, (%rax)
+	movl	-316(%rbp), %eax
+	cltq
+	addq	%rax, -304(%rbp)
+	addl	$1, -324(%rbp)
+.L1182:
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -324(%rbp)
+	jb	.L1183
+	jmp	.L1174
+.L1169:
+	cmpl	$0, -312(%rbp)
+	je	.L1184
+	cmpl	$1, -316(%rbp)
+	jne	.L1185
+	movl	$0, -324(%rbp)
+	jmp	.L1186
+.L1187:
+	movq	-240(%rbp), %rdx
+	movl	-324(%rbp), %eax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movq	-248(%rbp), %rcx
+	movl	-324(%rbp), %eax
+	addq	%rcx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %ecx
+	movq	-256(%rbp), %rsi
+	movl	-324(%rbp), %eax
+	addq	%rsi, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	movq	-304(%rbp), %rbx
+	leaq	1(%rbx), %rsi
+	movq	%rsi, -304(%rbp)
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__compute_y
+	movb	%al, (%rbx)
+	addl	$1, -324(%rbp)
+.L1186:
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -324(%rbp)
+	jb	.L1187
+	jmp	.L1174
+.L1185:
+	movl	$0, -324(%rbp)
+	jmp	.L1188
+.L1189:
+	movq	-240(%rbp), %rdx
+	movl	-324(%rbp), %eax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movq	-248(%rbp), %rcx
+	movl	-324(%rbp), %eax
+	addq	%rcx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %ecx
+	movq	-256(%rbp), %rsi
+	movl	-324(%rbp), %eax
+	addq	%rsi, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__compute_y
+	movq	-304(%rbp), %rdx
+	movb	%al, (%rdx)
+	movq	-304(%rbp), %rax
+	addq	$1, %rax
+	movb	$-1, (%rax)
+	addl	$1, -324(%rbp)
+	addq	$2, -304(%rbp)
+.L1188:
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -324(%rbp)
+	jb	.L1189
+	jmp	.L1174
+.L1184:
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %eax
+	cmpl	$4, %eax
+	jne	.L1190
+	movq	-360(%rbp), %rax
+	movl	18508(%rax), %eax
+	testl	%eax, %eax
+	jne	.L1190
+	movl	$0, -324(%rbp)
+	jmp	.L1191
+.L1192:
+	movq	-232(%rbp), %rdx
+	movl	-324(%rbp), %eax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movb	%al, -338(%rbp)
+	movzbl	-338(%rbp), %edx
+	movq	-256(%rbp), %rcx
+	movl	-324(%rbp), %eax
+	addq	%rcx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__blinn_8x8
+	movb	%al, -337(%rbp)
+	movzbl	-338(%rbp), %edx
+	movq	-248(%rbp), %rcx
+	movl	-324(%rbp), %eax
+	addq	%rcx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__blinn_8x8
+	movb	%al, -336(%rbp)
+	movzbl	-338(%rbp), %edx
+	movq	-240(%rbp), %rcx
+	movl	-324(%rbp), %eax
+	addq	%rcx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__blinn_8x8
+	movb	%al, -335(%rbp)
+	movzbl	-335(%rbp), %edx
+	movzbl	-336(%rbp), %ecx
+	movzbl	-337(%rbp), %eax
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__compute_y
+	movq	-304(%rbp), %rdx
+	movb	%al, (%rdx)
+	movq	-304(%rbp), %rax
+	addq	$1, %rax
+	movb	$-1, (%rax)
+	movl	-316(%rbp), %eax
+	cltq
+	addq	%rax, -304(%rbp)
+	addl	$1, -324(%rbp)
+.L1191:
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -324(%rbp)
+	jb	.L1192
+	jmp	.L1174
+.L1190:
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %eax
+	cmpl	$4, %eax
+	jne	.L1193
+	movq	-360(%rbp), %rax
+	movl	18508(%rax), %eax
+	cmpl	$2, %eax
+	jne	.L1193
+	movl	$0, -324(%rbp)
+	jmp	.L1194
+.L1195:
+	movq	-232(%rbp), %rdx
+	movl	-324(%rbp), %eax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movq	-256(%rbp), %rcx
+	movl	-324(%rbp), %eax
+	addq	%rcx, %rax
+	movzbl	(%rax), %eax
+	notl	%eax
+	movzbl	%al, %eax
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__blinn_8x8
+	movq	-304(%rbp), %rdx
+	movb	%al, (%rdx)
+	movq	-304(%rbp), %rax
+	addq	$1, %rax
+	movb	$-1, (%rax)
+	movl	-316(%rbp), %eax
+	cltq
+	addq	%rax, -304(%rbp)
+	addl	$1, -324(%rbp)
+.L1194:
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -324(%rbp)
+	jb	.L1195
+	jmp	.L1174
+.L1193:
+	movq	-256(%rbp), %rax
+	movq	%rax, -288(%rbp)
+	cmpl	$1, -316(%rbp)
+	jne	.L1196
+	movl	$0, -324(%rbp)
+	jmp	.L1197
+.L1198:
+	movl	-324(%rbp), %edx
+	movq	-288(%rbp), %rax
+	addq	%rdx, %rax
+	movl	-324(%rbp), %ecx
+	movq	-304(%rbp), %rdx
+	addq	%rcx, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	addl	$1, -324(%rbp)
+.L1197:
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -324(%rbp)
+	jb	.L1198
+	jmp	.L1174
+.L1196:
+	movl	$0, -324(%rbp)
+	jmp	.L1199
+.L1200:
+	movl	-324(%rbp), %edx
+	movq	-288(%rbp), %rax
+	leaq	(%rdx,%rax), %rcx
+	movq	-304(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	%rdx, -304(%rbp)
+	movzbl	(%rcx), %edx
+	movb	%dl, (%rax)
+	movq	-304(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	%rdx, -304(%rbp)
+	movb	$-1, (%rax)
+	addl	$1, -324(%rbp)
+.L1199:
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -324(%rbp)
+	jb	.L1200
+.L1174:
+	addl	$1, -320(%rbp)
+.L1161:
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	4(%rax), %eax
+	cmpl	%eax, -320(%rbp)
+	jb	.L1201
+	movq	-360(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__cleanup_jpeg
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	movl	%eax, %edx
+	movq	-368(%rbp), %rax
+	movl	%edx, (%rax)
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	4(%rax), %eax
+	movl	%eax, %edx
+	movq	-376(%rbp), %rax
+	movl	%edx, (%rax)
+	cmpq	$0, -384(%rbp)
+	je	.L1202
+	movq	-360(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %eax
+	cmpl	$2, %eax
+	jle	.L1203
+	movl	$3, %edx
+	jmp	.L1204
+.L1203:
+	movl	$1, %edx
+.L1204:
+	movq	-384(%rbp), %rax
+	movl	%edx, (%rax)
+.L1202:
+	movq	-296(%rbp), %rax
+.L1139:
+	movq	-24(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L1205
+	call	__stack_chk_fail@PLT
+.L1205:
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4985:
+	.size	load_jpeg_image, .-load_jpeg_image
+	.type	stbi__jpeg_load, @function
+stbi__jpeg_load:
+.LFB4986:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movq	%rcx, -48(%rbp)
+	movl	%r8d, -52(%rbp)
+	movq	%r9, -64(%rbp)
+	movl	$18568, %edi
+	call	stbi__malloc
+	movq	%rax, -16(%rbp)
+	cmpq	$0, -16(%rbp)
+	jne	.L1207
+	movl	$0, %eax
+	jmp	.L1208
+.L1207:
+	movq	-16(%rbp), %rax
+	movl	$18568, %edx
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+	movq	-16(%rbp), %rax
+	movq	-24(%rbp), %rdx
+	movq	%rdx, (%rax)
+	movq	-16(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__setup_jpeg
+	movl	-52(%rbp), %edi
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rdx
+	movq	-32(%rbp), %rsi
+	movq	-16(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	load_jpeg_image
+	movq	%rax, -8(%rbp)
+	movq	-16(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-8(%rbp), %rax
+.L1208:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4986:
+	.size	stbi__jpeg_load, .-stbi__jpeg_load
+	.type	stbi__jpeg_test, @function
+stbi__jpeg_test:
+.LFB4987:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movl	$18568, %edi
+	call	stbi__malloc
+	movq	%rax, -8(%rbp)
+	cmpq	$0, -8(%rbp)
+	jne	.L1210
+	leaq	.LC37(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1211
+.L1210:
+	movq	-8(%rbp), %rax
+	movl	$18568, %edx
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+	movq	-8(%rbp), %rax
+	movq	-24(%rbp), %rdx
+	movq	%rdx, (%rax)
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__setup_jpeg
+	movq	-8(%rbp), %rax
+	movl	$1, %esi
+	movq	%rax, %rdi
+	call	stbi__decode_jpeg_header
+	movl	%eax, -12(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	-12(%rbp), %eax
+.L1211:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4987:
+	.size	stbi__jpeg_test, .-stbi__jpeg_test
+	.type	stbi__jpeg_info_raw, @function
+stbi__jpeg_info_raw:
+.LFB4988:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	%rsi, -16(%rbp)
+	movq	%rdx, -24(%rbp)
+	movq	%rcx, -32(%rbp)
+	movq	-8(%rbp), %rax
+	movl	$2, %esi
+	movq	%rax, %rdi
+	call	stbi__decode_jpeg_header
+	testl	%eax, %eax
+	jne	.L1213
+	movq	-8(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L1214
+.L1213:
+	cmpq	$0, -16(%rbp)
+	je	.L1215
+	movq	-8(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	movl	%eax, %edx
+	movq	-16(%rbp), %rax
+	movl	%edx, (%rax)
+.L1215:
+	cmpq	$0, -24(%rbp)
+	je	.L1216
+	movq	-8(%rbp), %rax
+	movq	(%rax), %rax
+	movl	4(%rax), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, (%rax)
+.L1216:
+	cmpq	$0, -32(%rbp)
+	je	.L1217
+	movq	-8(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %eax
+	cmpl	$2, %eax
+	jle	.L1218
+	movl	$3, %edx
+	jmp	.L1219
+.L1218:
+	movl	$1, %edx
+.L1219:
+	movq	-32(%rbp), %rax
+	movl	%edx, (%rax)
+.L1217:
+	movl	$1, %eax
+.L1214:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4988:
+	.size	stbi__jpeg_info_raw, .-stbi__jpeg_info_raw
+	.type	stbi__jpeg_info, @function
+stbi__jpeg_info:
+.LFB4989:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movq	%rcx, -48(%rbp)
+	movl	$18568, %edi
+	call	stbi__malloc
+	movq	%rax, -8(%rbp)
+	cmpq	$0, -8(%rbp)
+	jne	.L1221
+	leaq	.LC37(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1222
+.L1221:
+	movq	-8(%rbp), %rax
+	movl	$18568, %edx
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+	movq	-8(%rbp), %rax
+	movq	-24(%rbp), %rdx
+	movq	%rdx, (%rax)
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rdx
+	movq	-32(%rbp), %rsi
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__jpeg_info_raw
+	movl	%eax, -12(%rbp)
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	-12(%rbp), %eax
+.L1222:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4989:
+	.size	stbi__jpeg_info, .-stbi__jpeg_info
+	.type	stbi__bitreverse16, @function
+stbi__bitreverse16:
+.LFB4990:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -4(%rbp)
+	movl	-4(%rbp), %eax
+	sarl	%eax
+	andl	$21845, %eax
+	movl	%eax, %edx
+	movl	-4(%rbp), %eax
+	addl	%eax, %eax
+	andl	$43690, %eax
+	orl	%edx, %eax
+	movl	%eax, -4(%rbp)
+	movl	-4(%rbp), %eax
+	sarl	$2, %eax
+	andl	$13107, %eax
+	movl	%eax, %edx
+	movl	-4(%rbp), %eax
+	sall	$2, %eax
+	andl	$52428, %eax
+	orl	%edx, %eax
+	movl	%eax, -4(%rbp)
+	movl	-4(%rbp), %eax
+	sarl	$4, %eax
+	andl	$3855, %eax
+	movl	%eax, %edx
+	movl	-4(%rbp), %eax
+	sall	$4, %eax
+	andl	$61680, %eax
+	orl	%edx, %eax
+	movl	%eax, -4(%rbp)
+	movl	-4(%rbp), %eax
+	sarl	$8, %eax
+	movzbl	%al, %edx
+	movl	-4(%rbp), %eax
+	sall	$8, %eax
+	movzwl	%ax, %eax
+	orl	%edx, %eax
+	movl	%eax, -4(%rbp)
+	movl	-4(%rbp), %eax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4990:
+	.size	stbi__bitreverse16, .-stbi__bitreverse16
+	.section	.rodata
+.LC42:
+	.string	"bits <= 16"
+	.text
+	.type	stbi__bit_reverse, @function
+stbi__bit_reverse:
+.LFB4991:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movl	%edi, -4(%rbp)
+	movl	%esi, -8(%rbp)
+	cmpl	$16, -8(%rbp)
+	jle	.L1226
+	leaq	__PRETTY_FUNCTION__.12(%rip), %rax
+	movq	%rax, %rcx
+	movl	$4118, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC42(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L1226:
+	movl	-4(%rbp), %eax
+	movl	%eax, %edi
+	call	stbi__bitreverse16
+	movl	%eax, %edx
+	movl	$16, %eax
+	subl	-8(%rbp), %eax
+	movl	%eax, %ecx
+	sarl	%cl, %edx
+	movl	%edx, %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4991:
+	.size	stbi__bit_reverse, .-stbi__bit_reverse
+	.section	.rodata
+.LC43:
+	.string	"bad sizes"
+.LC44:
+	.string	"bad codelengths"
+	.text
+	.type	stbi__zbuild_huffman, @function
+stbi__zbuild_huffman:
+.LFB4992:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$208, %rsp
+	movq	%rdi, -184(%rbp)
+	movq	%rsi, -192(%rbp)
+	movl	%edx, -196(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movl	$0, -164(%rbp)
+	leaq	-80(%rbp), %rax
+	movl	$68, %edx
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+	movq	-184(%rbp), %rax
+	movl	$1024, %edx
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+	movl	$0, -168(%rbp)
+	jmp	.L1229
+.L1230:
+	movl	-168(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-192(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	movslq	%eax, %rdx
+	movl	-80(%rbp,%rdx,4), %edx
+	addl	$1, %edx
+	cltq
+	movl	%edx, -80(%rbp,%rax,4)
+	addl	$1, -168(%rbp)
+.L1229:
+	movl	-168(%rbp), %eax
+	cmpl	-196(%rbp), %eax
+	jl	.L1230
+	movl	$0, -80(%rbp)
+	movl	$1, -168(%rbp)
+	jmp	.L1231
+.L1234:
+	movl	-168(%rbp), %eax
+	cltq
+	movl	-80(%rbp,%rax,4), %edx
+	movl	-168(%rbp), %eax
+	movl	$1, %esi
+	movl	%eax, %ecx
+	sall	%cl, %esi
+	movl	%esi, %eax
+	cmpl	%eax, %edx
+	jle	.L1232
+	leaq	.LC43(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1244
+.L1232:
+	addl	$1, -168(%rbp)
+.L1231:
+	cmpl	$15, -168(%rbp)
+	jle	.L1234
+	movl	$0, -160(%rbp)
+	movl	$1, -168(%rbp)
+	jmp	.L1235
+.L1237:
+	movl	-168(%rbp), %eax
+	cltq
+	movl	-160(%rbp), %edx
+	movl	%edx, -144(%rbp,%rax,4)
+	movl	-160(%rbp), %eax
+	movl	%eax, %ecx
+	movq	-184(%rbp), %rax
+	movl	-168(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$512, %rdx
+	movw	%cx, (%rax,%rdx,2)
+	movl	-164(%rbp), %eax
+	movl	%eax, %ecx
+	movq	-184(%rbp), %rax
+	movl	-168(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$560, %rdx
+	movw	%cx, 4(%rax,%rdx,2)
+	movl	-168(%rbp), %eax
+	cltq
+	movl	-80(%rbp,%rax,4), %eax
+	addl	%eax, -160(%rbp)
+	movl	-168(%rbp), %eax
+	cltq
+	movl	-80(%rbp,%rax,4), %eax
+	testl	%eax, %eax
+	je	.L1236
+	movl	-168(%rbp), %eax
+	movl	$1, %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	%edx, %eax
+	cmpl	%eax, -160(%rbp)
+	jle	.L1236
+	leaq	.LC44(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1244
+.L1236:
+	movl	$16, %eax
+	subl	-168(%rbp), %eax
+	movl	-160(%rbp), %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	%edx, %ecx
+	movq	-184(%rbp), %rax
+	movl	-168(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$264, %rdx
+	movl	%ecx, (%rax,%rdx,4)
+	sall	-160(%rbp)
+	movl	-168(%rbp), %eax
+	cltq
+	movl	-80(%rbp,%rax,4), %eax
+	addl	%eax, -164(%rbp)
+	addl	$1, -168(%rbp)
+.L1235:
+	cmpl	$15, -168(%rbp)
+	jle	.L1237
+	movq	-184(%rbp), %rax
+	movl	$65536, 1120(%rax)
+	movl	$0, -168(%rbp)
+	jmp	.L1238
+.L1243:
+	movl	-168(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-192(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	movl	%eax, -152(%rbp)
+	cmpl	$0, -152(%rbp)
+	je	.L1239
+	movl	-152(%rbp), %eax
+	cltq
+	movl	-144(%rbp,%rax,4), %edx
+	movq	-184(%rbp), %rax
+	movl	-152(%rbp), %ecx
+	movslq	%ecx, %rcx
+	addq	$512, %rcx
+	movzwl	(%rax,%rcx,2), %eax
+	movzwl	%ax, %eax
+	movl	%edx, %ecx
+	subl	%eax, %ecx
+	movq	-184(%rbp), %rax
+	movl	-152(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$560, %rdx
+	movzwl	4(%rax,%rdx,2), %eax
+	movzwl	%ax, %eax
+	addl	%ecx, %eax
+	movl	%eax, -148(%rbp)
+	movl	-152(%rbp), %eax
+	sall	$9, %eax
+	movl	%eax, %edx
+	movl	-168(%rbp), %eax
+	orl	%edx, %eax
+	movw	%ax, -170(%rbp)
+	movl	-152(%rbp), %eax
+	movl	%eax, %ecx
+	movq	-184(%rbp), %rdx
+	movl	-148(%rbp), %eax
+	cltq
+	movb	%cl, 1156(%rdx,%rax)
+	movl	-168(%rbp), %eax
+	movl	%eax, %ecx
+	movq	-184(%rbp), %rax
+	movl	-148(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$720, %rdx
+	movw	%cx, 4(%rax,%rdx,2)
+	cmpl	$9, -152(%rbp)
+	jg	.L1240
+	movl	-152(%rbp), %eax
+	cltq
+	movl	-144(%rbp,%rax,4), %eax
+	movl	-152(%rbp), %edx
+	movl	%edx, %esi
+	movl	%eax, %edi
+	call	stbi__bit_reverse
+	movl	%eax, -156(%rbp)
+	jmp	.L1241
+.L1242:
+	movq	-184(%rbp), %rax
+	movl	-156(%rbp), %edx
+	movslq	%edx, %rdx
+	movzwl	-170(%rbp), %ecx
+	movw	%cx, (%rax,%rdx,2)
+	movl	-152(%rbp), %eax
+	movl	$1, %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	%edx, %eax
+	addl	%eax, -156(%rbp)
+.L1241:
+	cmpl	$511, -156(%rbp)
+	jle	.L1242
+.L1240:
+	movl	-152(%rbp), %eax
+	cltq
+	movl	-144(%rbp,%rax,4), %eax
+	leal	1(%rax), %edx
+	movl	-152(%rbp), %eax
+	cltq
+	movl	%edx, -144(%rbp,%rax,4)
+.L1239:
+	addl	$1, -168(%rbp)
+.L1238:
+	movl	-168(%rbp), %eax
+	cmpl	-196(%rbp), %eax
+	jl	.L1243
+	movl	$1, %eax
+.L1244:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L1245
+	call	__stack_chk_fail@PLT
+.L1245:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4992:
+	.size	stbi__zbuild_huffman, .-stbi__zbuild_huffman
+	.type	stbi__zeof, @function
+stbi__zeof:
+.LFB4993:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movq	(%rax), %rdx
+	movq	-8(%rbp), %rax
+	movq	8(%rax), %rax
+	cmpq	%rax, %rdx
+	setnb	%al
+	movzbl	%al, %eax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4993:
+	.size	stbi__zeof, .-stbi__zeof
+	.type	stbi__zget8, @function
+stbi__zget8:
+.LFB4994:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$8, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__zeof
+	testl	%eax, %eax
+	jne	.L1249
+	movq	-8(%rbp), %rax
+	movq	(%rax), %rax
+	leaq	1(%rax), %rcx
+	movq	-8(%rbp), %rdx
+	movq	%rcx, (%rdx)
+	movzbl	(%rax), %eax
+	jmp	.L1251
+.L1249:
+	movl	$0, %eax
+.L1251:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4994:
+	.size	stbi__zget8, .-stbi__zget8
+	.type	stbi__fill_bits, @function
+stbi__fill_bits:
+.LFB4995:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$8, %rsp
+	movq	%rdi, -8(%rbp)
+.L1255:
+	movq	-8(%rbp), %rax
+	movl	24(%rax), %edx
+	movq	-8(%rbp), %rax
+	movl	16(%rax), %eax
+	movl	%eax, %ecx
+	shrl	%cl, %edx
+	movl	%edx, %eax
+	testl	%eax, %eax
+	je	.L1253
+	movq	-8(%rbp), %rax
+	movq	8(%rax), %rdx
+	movq	-8(%rbp), %rax
+	movq	%rdx, (%rax)
+	jmp	.L1252
+.L1253:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__zget8
+	movzbl	%al, %edx
+	movq	-8(%rbp), %rax
+	movl	16(%rax), %eax
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movq	-8(%rbp), %rax
+	movl	24(%rax), %eax
+	orl	%eax, %edx
+	movq	-8(%rbp), %rax
+	movl	%edx, 24(%rax)
+	movq	-8(%rbp), %rax
+	movl	16(%rax), %eax
+	leal	8(%rax), %edx
+	movq	-8(%rbp), %rax
+	movl	%edx, 16(%rax)
+	movq	-8(%rbp), %rax
+	movl	16(%rax), %eax
+	cmpl	$24, %eax
+	jle	.L1255
+.L1252:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4995:
+	.size	stbi__fill_bits, .-stbi__fill_bits
+	.type	stbi__zreceive, @function
+stbi__zreceive:
+.LFB4996:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movl	%esi, -28(%rbp)
+	movq	-24(%rbp), %rax
+	movl	16(%rax), %eax
+	cmpl	%eax, -28(%rbp)
+	jle	.L1257
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__fill_bits
+.L1257:
+	movq	-24(%rbp), %rax
+	movl	24(%rax), %edx
+	movl	-28(%rbp), %eax
+	movl	$1, %esi
+	movl	%eax, %ecx
+	sall	%cl, %esi
+	movl	%esi, %eax
+	subl	$1, %eax
+	andl	%edx, %eax
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movl	24(%rax), %edx
+	movl	-28(%rbp), %eax
+	movl	%eax, %ecx
+	shrl	%cl, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 24(%rax)
+	movq	-24(%rbp), %rax
+	movl	16(%rax), %eax
+	subl	-28(%rbp), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 16(%rax)
+	movl	-4(%rbp), %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4996:
+	.size	stbi__zreceive, .-stbi__zreceive
+	.type	stbi__zhuffman_decode_slowpath, @function
+stbi__zhuffman_decode_slowpath:
+.LFB4997:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	-24(%rbp), %rax
+	movl	24(%rax), %eax
+	movl	$16, %esi
+	movl	%eax, %edi
+	call	stbi__bit_reverse
+	movl	%eax, -8(%rbp)
+	movl	$10, -12(%rbp)
+.L1262:
+	movq	-32(%rbp), %rax
+	movl	-12(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$264, %rdx
+	movl	(%rax,%rdx,4), %eax
+	cmpl	%eax, -8(%rbp)
+	jl	.L1268
+	addl	$1, -12(%rbp)
+	jmp	.L1262
+.L1268:
+	nop
+	cmpl	$15, -12(%rbp)
+	jle	.L1263
+	movl	$-1, %eax
+	jmp	.L1264
+.L1263:
+	movl	$16, %eax
+	subl	-12(%rbp), %eax
+	movl	-8(%rbp), %edx
+	movl	%eax, %ecx
+	sarl	%cl, %edx
+	movl	%edx, %ecx
+	movq	-32(%rbp), %rax
+	movl	-12(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$512, %rdx
+	movzwl	(%rax,%rdx,2), %eax
+	movzwl	%ax, %eax
+	subl	%eax, %ecx
+	movq	-32(%rbp), %rax
+	movl	-12(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$560, %rdx
+	movzwl	4(%rax,%rdx,2), %eax
+	movzwl	%ax, %eax
+	addl	%ecx, %eax
+	movl	%eax, -4(%rbp)
+	cmpl	$287, -4(%rbp)
+	jle	.L1265
+	movl	$-1, %eax
+	jmp	.L1264
+.L1265:
+	movq	-32(%rbp), %rdx
+	movl	-4(%rbp), %eax
+	cltq
+	movzbl	1156(%rdx,%rax), %eax
+	movzbl	%al, %eax
+	cmpl	%eax, -12(%rbp)
+	je	.L1266
+	movl	$-1, %eax
+	jmp	.L1264
+.L1266:
+	movq	-24(%rbp), %rax
+	movl	24(%rax), %edx
+	movl	-12(%rbp), %eax
+	movl	%eax, %ecx
+	shrl	%cl, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 24(%rax)
+	movq	-24(%rbp), %rax
+	movl	16(%rax), %eax
+	subl	-12(%rbp), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 16(%rax)
+	movq	-32(%rbp), %rax
+	movl	-4(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$720, %rdx
+	movzwl	4(%rax,%rdx,2), %eax
+	movzwl	%ax, %eax
+.L1264:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4997:
+	.size	stbi__zhuffman_decode_slowpath, .-stbi__zhuffman_decode_slowpath
+	.type	stbi__zhuffman_decode, @function
+stbi__zhuffman_decode:
+.LFB4998:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	-24(%rbp), %rax
+	movl	16(%rax), %eax
+	cmpl	$15, %eax
+	jg	.L1270
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__zeof
+	testl	%eax, %eax
+	je	.L1271
+	movq	-24(%rbp), %rax
+	movl	20(%rax), %eax
+	testl	%eax, %eax
+	jne	.L1272
+	movq	-24(%rbp), %rax
+	movl	$1, 20(%rax)
+	movq	-24(%rbp), %rax
+	movl	16(%rax), %eax
+	leal	16(%rax), %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 16(%rax)
+	jmp	.L1270
+.L1272:
+	movl	$-1, %eax
+	jmp	.L1273
+.L1271:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__fill_bits
+.L1270:
+	movq	-24(%rbp), %rax
+	movl	24(%rax), %eax
+	andl	$511, %eax
+	movl	%eax, %edx
+	movq	-32(%rbp), %rax
+	movl	%edx, %edx
+	movzwl	(%rax,%rdx,2), %eax
+	movzwl	%ax, %eax
+	movl	%eax, -8(%rbp)
+	cmpl	$0, -8(%rbp)
+	je	.L1274
+	movl	-8(%rbp), %eax
+	sarl	$9, %eax
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movl	24(%rax), %edx
+	movl	-4(%rbp), %eax
+	movl	%eax, %ecx
+	shrl	%cl, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 24(%rax)
+	movq	-24(%rbp), %rax
+	movl	16(%rax), %eax
+	subl	-4(%rbp), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 16(%rax)
+	movl	-8(%rbp), %eax
+	andl	$511, %eax
+	jmp	.L1273
+.L1274:
+	movq	-32(%rbp), %rdx
+	movq	-24(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__zhuffman_decode_slowpath
+.L1273:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4998:
+	.size	stbi__zhuffman_decode, .-stbi__zhuffman_decode
+	.section	.rodata
+.LC45:
+	.string	"output buffer limit"
+	.text
+	.type	stbi__zexpand, @function
+stbi__zexpand:
+.LFB4999:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -40(%rbp)
+	movq	%rsi, -48(%rbp)
+	movl	%edx, -52(%rbp)
+	movq	-40(%rbp), %rax
+	movq	-48(%rbp), %rdx
+	movq	%rdx, 32(%rax)
+	movq	-40(%rbp), %rax
+	movl	56(%rax), %eax
+	testl	%eax, %eax
+	jne	.L1276
+	leaq	.LC45(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1277
+.L1276:
+	movq	-40(%rbp), %rax
+	movq	32(%rax), %rdx
+	movq	-40(%rbp), %rax
+	movq	40(%rax), %rax
+	subq	%rax, %rdx
+	movl	%edx, -16(%rbp)
+	movq	-40(%rbp), %rax
+	movq	48(%rax), %rdx
+	movq	-40(%rbp), %rax
+	movq	40(%rax), %rax
+	subq	%rax, %rdx
+	movl	%edx, -12(%rbp)
+	movl	-12(%rbp), %eax
+	movl	%eax, -20(%rbp)
+	movl	-16(%rbp), %eax
+	notl	%eax
+	movl	%eax, %edx
+	movl	-52(%rbp), %eax
+	cmpl	%eax, %edx
+	jnb	.L1279
+	leaq	.LC37(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1277
+.L1281:
+	movl	-20(%rbp), %eax
+	testl	%eax, %eax
+	jns	.L1280
+	leaq	.LC37(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1277
+.L1280:
+	sall	-20(%rbp)
+.L1279:
+	movl	-52(%rbp), %edx
+	movl	-16(%rbp), %eax
+	addl	%edx, %eax
+	cmpl	%eax, -20(%rbp)
+	jb	.L1281
+	movl	-20(%rbp), %edx
+	movq	-40(%rbp), %rax
+	movq	40(%rax), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	realloc@PLT
+	movq	%rax, -8(%rbp)
+	cmpq	$0, -8(%rbp)
+	jne	.L1282
+	leaq	.LC37(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1277
+.L1282:
+	movq	-40(%rbp), %rax
+	movq	-8(%rbp), %rdx
+	movq	%rdx, 40(%rax)
+	movl	-16(%rbp), %edx
+	movq	-8(%rbp), %rax
+	addq	%rax, %rdx
+	movq	-40(%rbp), %rax
+	movq	%rdx, 32(%rax)
+	movl	-20(%rbp), %edx
+	movq	-8(%rbp), %rax
+	addq	%rax, %rdx
+	movq	-40(%rbp), %rax
+	movq	%rdx, 48(%rax)
+	movl	$1, %eax
+.L1277:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE4999:
+	.size	stbi__zexpand, .-stbi__zexpand
+	.section	.rodata
+	.align 32
+	.type	stbi__zlength_base, @object
+	.size	stbi__zlength_base, 124
+stbi__zlength_base:
+	.long	3
+	.long	4
+	.long	5
+	.long	6
+	.long	7
+	.long	8
+	.long	9
+	.long	10
+	.long	11
+	.long	13
+	.long	15
+	.long	17
+	.long	19
+	.long	23
+	.long	27
+	.long	31
+	.long	35
+	.long	43
+	.long	51
+	.long	59
+	.long	67
+	.long	83
+	.long	99
+	.long	115
+	.long	131
+	.long	163
+	.long	195
+	.long	227
+	.long	258
+	.long	0
+	.long	0
+	.align 32
+	.type	stbi__zlength_extra, @object
+	.size	stbi__zlength_extra, 124
+stbi__zlength_extra:
+	.long	0
+	.long	0
+	.long	0
+	.long	0
+	.long	0
+	.long	0
+	.long	0
+	.long	0
+	.long	1
+	.long	1
+	.long	1
+	.long	1
+	.long	2
+	.long	2
+	.long	2
+	.long	2
+	.long	3
+	.long	3
+	.long	3
+	.long	3
+	.long	4
+	.long	4
+	.long	4
+	.long	4
+	.long	5
+	.long	5
+	.long	5
+	.long	5
+	.long	0
+	.long	0
+	.long	0
+	.align 32
+	.type	stbi__zdist_base, @object
+	.size	stbi__zdist_base, 128
+stbi__zdist_base:
+	.long	1
+	.long	2
+	.long	3
+	.long	4
+	.long	5
+	.long	7
+	.long	9
+	.long	13
+	.long	17
+	.long	25
+	.long	33
+	.long	49
+	.long	65
+	.long	97
+	.long	129
+	.long	193
+	.long	257
+	.long	385
+	.long	513
+	.long	769
+	.long	1025
+	.long	1537
+	.long	2049
+	.long	3073
+	.long	4097
+	.long	6145
+	.long	8193
+	.long	12289
+	.long	16385
+	.long	24577
+	.long	0
+	.long	0
+	.align 32
+	.type	stbi__zdist_extra, @object
+	.size	stbi__zdist_extra, 128
+stbi__zdist_extra:
+	.long	0
+	.long	0
+	.long	0
+	.long	0
+	.long	1
+	.long	1
+	.long	2
+	.long	2
+	.long	3
+	.long	3
+	.long	4
+	.long	4
+	.long	5
+	.long	5
+	.long	6
+	.long	6
+	.long	7
+	.long	7
+	.long	8
+	.long	8
+	.long	9
+	.long	9
+	.long	10
+	.long	10
+	.long	11
+	.long	11
+	.long	12
+	.long	12
+	.long	13
+	.long	13
+	.zero	8
+.LC46:
+	.string	"unexpected end"
+.LC47:
+	.string	"bad dist"
+	.text
+	.type	stbi__parse_huffman_block, @function
+stbi__parse_huffman_block:
+.LFB5000:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -40(%rbp)
+	movq	-40(%rbp), %rax
+	movq	32(%rax), %rax
+	movq	%rax, -16(%rbp)
+.L1303:
+	movq	-40(%rbp), %rax
+	leaq	60(%rax), %rdx
+	movq	-40(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__zhuffman_decode
+	movl	%eax, -20(%rbp)
+	cmpl	$255, -20(%rbp)
+	jg	.L1284
+	cmpl	$0, -20(%rbp)
+	jns	.L1285
+	leaq	.LC12(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1286
+.L1285:
+	movq	-40(%rbp), %rax
+	movq	48(%rax), %rax
+	cmpq	%rax, -16(%rbp)
+	jb	.L1287
+	movq	-16(%rbp), %rcx
+	movq	-40(%rbp), %rax
+	movl	$1, %edx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__zexpand
+	testl	%eax, %eax
+	jne	.L1288
+	movl	$0, %eax
+	jmp	.L1286
+.L1288:
+	movq	-40(%rbp), %rax
+	movq	32(%rax), %rax
+	movq	%rax, -16(%rbp)
+.L1287:
+	movq	-16(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	%rdx, -16(%rbp)
+	movl	-20(%rbp), %edx
+	movb	%dl, (%rax)
+	jmp	.L1303
+.L1284:
+	cmpl	$256, -20(%rbp)
+	jne	.L1290
+	movq	-40(%rbp), %rax
+	movq	-16(%rbp), %rdx
+	movq	%rdx, 32(%rax)
+	movq	-40(%rbp), %rax
+	movl	20(%rax), %eax
+	testl	%eax, %eax
+	je	.L1291
+	movq	-40(%rbp), %rax
+	movl	16(%rax), %eax
+	cmpl	$15, %eax
+	jg	.L1291
+	leaq	.LC46(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1286
+.L1291:
+	movl	$1, %eax
+	jmp	.L1286
+.L1290:
+	cmpl	$285, -20(%rbp)
+	jle	.L1292
+	leaq	.LC12(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1286
+.L1292:
+	subl	$257, -20(%rbp)
+	movl	-20(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	leaq	stbi__zlength_base(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	movl	%eax, -28(%rbp)
+	movl	-20(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	leaq	stbi__zlength_extra(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	testl	%eax, %eax
+	je	.L1293
+	movl	-20(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	leaq	stbi__zlength_extra(%rip), %rax
+	movl	(%rdx,%rax), %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__zreceive
+	movl	-28(%rbp), %edx
+	addl	%edx, %eax
+	movl	%eax, -28(%rbp)
+.L1293:
+	movq	-40(%rbp), %rax
+	leaq	2080(%rax), %rdx
+	movq	-40(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__zhuffman_decode
+	movl	%eax, -20(%rbp)
+	cmpl	$0, -20(%rbp)
+	js	.L1294
+	cmpl	$29, -20(%rbp)
+	jle	.L1295
+.L1294:
+	leaq	.LC12(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1286
+.L1295:
+	movl	-20(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	leaq	stbi__zdist_base(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	movl	%eax, -24(%rbp)
+	movl	-20(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	leaq	stbi__zdist_extra(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	testl	%eax, %eax
+	je	.L1296
+	movl	-20(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	leaq	stbi__zdist_extra(%rip), %rax
+	movl	(%rdx,%rax), %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__zreceive
+	movl	-24(%rbp), %edx
+	addl	%edx, %eax
+	movl	%eax, -24(%rbp)
+.L1296:
+	movq	-40(%rbp), %rax
+	movq	40(%rax), %rax
+	movq	-16(%rbp), %rdx
+	subq	%rax, %rdx
+	movl	-24(%rbp), %eax
+	cltq
+	cmpq	%rax, %rdx
+	jge	.L1297
+	leaq	.LC47(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1286
+.L1297:
+	movl	-28(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-40(%rbp), %rax
+	movq	48(%rax), %rax
+	subq	-16(%rbp), %rax
+	cmpq	%rax, %rdx
+	jle	.L1298
+	movl	-28(%rbp), %edx
+	movq	-16(%rbp), %rcx
+	movq	-40(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__zexpand
+	testl	%eax, %eax
+	jne	.L1299
+	movl	$0, %eax
+	jmp	.L1286
+.L1299:
+	movq	-40(%rbp), %rax
+	movq	32(%rax), %rax
+	movq	%rax, -16(%rbp)
+.L1298:
+	movl	-24(%rbp), %eax
+	cltq
+	negq	%rax
+	movq	%rax, %rdx
+	movq	-16(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -8(%rbp)
+	cmpl	$1, -24(%rbp)
+	jne	.L1300
+	movq	-8(%rbp), %rax
+	movzbl	(%rax), %eax
+	movb	%al, -29(%rbp)
+	cmpl	$0, -28(%rbp)
+	je	.L1303
+.L1301:
+	movq	-16(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	%rdx, -16(%rbp)
+	movzbl	-29(%rbp), %edx
+	movb	%dl, (%rax)
+	subl	$1, -28(%rbp)
+	cmpl	$0, -28(%rbp)
+	jne	.L1301
+	jmp	.L1303
+.L1300:
+	cmpl	$0, -28(%rbp)
+	je	.L1303
+.L1302:
+	movq	-8(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	%rdx, -8(%rbp)
+	movzbl	(%rax), %ecx
+	movq	-16(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	%rdx, -16(%rbp)
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	subl	$1, -28(%rbp)
+	cmpl	$0, -28(%rbp)
+	jne	.L1302
+	jmp	.L1303
+.L1286:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5000:
+	.size	stbi__parse_huffman_block, .-stbi__parse_huffman_block
+	.type	stbi__compute_huffman_codes, @function
+stbi__compute_huffman_codes:
+.LFB5001:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$2592, %rsp
+	movq	%rdi, -2584(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-2584(%rbp), %rax
+	movl	$5, %esi
+	movq	%rax, %rdi
+	call	stbi__zreceive
+	addl	$257, %eax
+	movl	%eax, -2548(%rbp)
+	movq	-2584(%rbp), %rax
+	movl	$5, %esi
+	movq	%rax, %rdi
+	call	stbi__zreceive
+	addl	$1, %eax
+	movl	%eax, -2544(%rbp)
+	movq	-2584(%rbp), %rax
+	movl	$4, %esi
+	movq	%rax, %rdi
+	call	stbi__zreceive
+	addl	$4, %eax
+	movl	%eax, -2540(%rbp)
+	movl	-2548(%rbp), %edx
+	movl	-2544(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, -2536(%rbp)
+	leaq	-496(%rbp), %rax
+	movl	$19, %edx
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+	movl	$0, -2560(%rbp)
+	jmp	.L1305
+.L1306:
+	movq	-2584(%rbp), %rax
+	movl	$3, %esi
+	movq	%rax, %rdi
+	call	stbi__zreceive
+	movl	%eax, -2532(%rbp)
+	movl	-2560(%rbp), %eax
+	cltq
+	leaq	length_dezigzag.11(%rip), %rdx
+	movzbl	(%rax,%rdx), %eax
+	movzbl	%al, %eax
+	movl	-2532(%rbp), %edx
+	cltq
+	movb	%dl, -496(%rbp,%rax)
+	addl	$1, -2560(%rbp)
+.L1305:
+	movl	-2560(%rbp), %eax
+	cmpl	-2540(%rbp), %eax
+	jl	.L1306
+	leaq	-496(%rbp), %rcx
+	leaq	-2528(%rbp), %rax
+	movl	$19, %edx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__zbuild_huffman
+	testl	%eax, %eax
+	jne	.L1307
+	movl	$0, %eax
+	jmp	.L1323
+.L1307:
+	movl	$0, -2556(%rbp)
+	jmp	.L1309
+.L1319:
+	leaq	-2528(%rbp), %rdx
+	movq	-2584(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__zhuffman_decode
+	movl	%eax, -2552(%rbp)
+	cmpl	$0, -2552(%rbp)
+	js	.L1310
+	cmpl	$18, -2552(%rbp)
+	jle	.L1311
+.L1310:
+	leaq	.LC44(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1323
+.L1311:
+	cmpl	$15, -2552(%rbp)
+	jg	.L1312
+	movl	-2556(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -2556(%rbp)
+	movl	-2552(%rbp), %edx
+	cltq
+	movb	%dl, -464(%rbp,%rax)
+	jmp	.L1309
+.L1312:
+	movb	$0, -2561(%rbp)
+	cmpl	$16, -2552(%rbp)
+	jne	.L1313
+	movq	-2584(%rbp), %rax
+	movl	$2, %esi
+	movq	%rax, %rdi
+	call	stbi__zreceive
+	addl	$3, %eax
+	movl	%eax, -2552(%rbp)
+	cmpl	$0, -2556(%rbp)
+	jne	.L1314
+	leaq	.LC44(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1323
+.L1314:
+	movl	-2556(%rbp), %eax
+	subl	$1, %eax
+	cltq
+	movzbl	-464(%rbp,%rax), %eax
+	movb	%al, -2561(%rbp)
+	jmp	.L1315
+.L1313:
+	cmpl	$17, -2552(%rbp)
+	jne	.L1316
+	movq	-2584(%rbp), %rax
+	movl	$3, %esi
+	movq	%rax, %rdi
+	call	stbi__zreceive
+	addl	$3, %eax
+	movl	%eax, -2552(%rbp)
+	jmp	.L1315
+.L1316:
+	cmpl	$18, -2552(%rbp)
+	jne	.L1317
+	movq	-2584(%rbp), %rax
+	movl	$7, %esi
+	movq	%rax, %rdi
+	call	stbi__zreceive
+	addl	$11, %eax
+	movl	%eax, -2552(%rbp)
+	jmp	.L1315
+.L1317:
+	leaq	.LC44(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1323
+.L1315:
+	movl	-2536(%rbp), %eax
+	subl	-2556(%rbp), %eax
+	cmpl	%eax, -2552(%rbp)
+	jle	.L1318
+	leaq	.LC44(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1323
+.L1318:
+	movl	-2552(%rbp), %eax
+	movslq	%eax, %rdx
+	movzbl	-2561(%rbp), %eax
+	movl	-2556(%rbp), %ecx
+	movslq	%ecx, %rcx
+	leaq	-464(%rbp), %rsi
+	addq	%rsi, %rcx
+	movl	%eax, %esi
+	movq	%rcx, %rdi
+	call	memset@PLT
+	movl	-2552(%rbp), %eax
+	addl	%eax, -2556(%rbp)
+.L1309:
+	movl	-2556(%rbp), %eax
+	cmpl	-2536(%rbp), %eax
+	jl	.L1319
+	movl	-2556(%rbp), %eax
+	cmpl	-2536(%rbp), %eax
+	je	.L1320
+	leaq	.LC44(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1323
+.L1320:
+	movq	-2584(%rbp), %rax
+	leaq	60(%rax), %rcx
+	movl	-2548(%rbp), %edx
+	leaq	-464(%rbp), %rax
+	movq	%rax, %rsi
+	movq	%rcx, %rdi
+	call	stbi__zbuild_huffman
+	testl	%eax, %eax
+	jne	.L1321
+	movl	$0, %eax
+	jmp	.L1323
+.L1321:
+	movl	-2548(%rbp), %eax
+	cltq
+	leaq	-464(%rbp), %rdx
+	leaq	(%rdx,%rax), %rsi
+	movq	-2584(%rbp), %rax
+	leaq	2080(%rax), %rcx
+	movl	-2544(%rbp), %eax
+	movl	%eax, %edx
+	movq	%rcx, %rdi
+	call	stbi__zbuild_huffman
+	testl	%eax, %eax
+	jne	.L1322
+	movl	$0, %eax
+	jmp	.L1323
+.L1322:
+	movl	$1, %eax
+.L1323:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L1324
+	call	__stack_chk_fail@PLT
+.L1324:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5001:
+	.size	stbi__compute_huffman_codes, .-stbi__compute_huffman_codes
+	.section	.rodata
+.LC48:
+	.string	"zlib corrupt"
+.LC49:
+	.string	"read past buffer"
+	.text
+	.type	stbi__parse_uncompressed_block, @function
+stbi__parse_uncompressed_block:
+.LFB5002:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$56, %rsp
+	.cfi_offset 3, -24
+	movq	%rdi, -56(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -24(%rbp)
+	xorl	%eax, %eax
+	movq	-56(%rbp), %rax
+	movl	16(%rax), %eax
+	andl	$7, %eax
+	testl	%eax, %eax
+	je	.L1326
+	movq	-56(%rbp), %rax
+	movl	16(%rax), %eax
+	andl	$7, %eax
+	movl	%eax, %edx
+	movq	-56(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__zreceive
+.L1326:
+	movl	$0, -40(%rbp)
+	jmp	.L1327
+.L1328:
+	movq	-56(%rbp), %rax
+	movl	24(%rax), %ecx
+	movl	-40(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -40(%rbp)
+	movl	%ecx, %edx
+	cltq
+	movb	%dl, -28(%rbp,%rax)
+	movq	-56(%rbp), %rax
+	movl	24(%rax), %eax
+	shrl	$8, %eax
+	movl	%eax, %edx
+	movq	-56(%rbp), %rax
+	movl	%edx, 24(%rax)
+	movq	-56(%rbp), %rax
+	movl	16(%rax), %eax
+	leal	-8(%rax), %edx
+	movq	-56(%rbp), %rax
+	movl	%edx, 16(%rax)
+.L1327:
+	movq	-56(%rbp), %rax
+	movl	16(%rax), %eax
+	testl	%eax, %eax
+	jg	.L1328
+	movq	-56(%rbp), %rax
+	movl	16(%rax), %eax
+	testl	%eax, %eax
+	jns	.L1331
+	leaq	.LC48(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1336
+.L1332:
+	movl	-40(%rbp), %ebx
+	leal	1(%rbx), %eax
+	movl	%eax, -40(%rbp)
+	movq	-56(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__zget8
+	movslq	%ebx, %rdx
+	movb	%al, -28(%rbp,%rdx)
+.L1331:
+	cmpl	$3, -40(%rbp)
+	jle	.L1332
+	movzbl	-27(%rbp), %eax
+	movzbl	%al, %eax
+	sall	$8, %eax
+	movl	%eax, %edx
+	movzbl	-28(%rbp), %eax
+	movzbl	%al, %eax
+	addl	%edx, %eax
+	movl	%eax, -36(%rbp)
+	movzbl	-25(%rbp), %eax
+	movzbl	%al, %eax
+	sall	$8, %eax
+	movl	%eax, %edx
+	movzbl	-26(%rbp), %eax
+	movzbl	%al, %eax
+	addl	%edx, %eax
+	movl	%eax, -32(%rbp)
+	movl	-36(%rbp), %eax
+	xorl	$65535, %eax
+	cmpl	%eax, -32(%rbp)
+	je	.L1333
+	leaq	.LC48(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1336
+.L1333:
+	movq	-56(%rbp), %rax
+	movq	(%rax), %rdx
+	movl	-36(%rbp), %eax
+	cltq
+	addq	%rax, %rdx
+	movq	-56(%rbp), %rax
+	movq	8(%rax), %rax
+	cmpq	%rdx, %rax
+	jnb	.L1334
+	leaq	.LC49(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1336
+.L1334:
+	movq	-56(%rbp), %rax
+	movq	32(%rax), %rdx
+	movl	-36(%rbp), %eax
+	cltq
+	addq	%rax, %rdx
+	movq	-56(%rbp), %rax
+	movq	48(%rax), %rax
+	cmpq	%rdx, %rax
+	jnb	.L1335
+	movq	-56(%rbp), %rax
+	movq	32(%rax), %rcx
+	movl	-36(%rbp), %edx
+	movq	-56(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__zexpand
+	testl	%eax, %eax
+	jne	.L1335
+	movl	$0, %eax
+	jmp	.L1336
+.L1335:
+	movl	-36(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-56(%rbp), %rax
+	movq	(%rax), %rcx
+	movq	-56(%rbp), %rax
+	movq	32(%rax), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	memcpy@PLT
+	movq	-56(%rbp), %rax
+	movq	(%rax), %rdx
+	movl	-36(%rbp), %eax
+	cltq
+	addq	%rax, %rdx
+	movq	-56(%rbp), %rax
+	movq	%rdx, (%rax)
+	movq	-56(%rbp), %rax
+	movq	32(%rax), %rdx
+	movl	-36(%rbp), %eax
+	cltq
+	addq	%rax, %rdx
+	movq	-56(%rbp), %rax
+	movq	%rdx, 32(%rax)
+	movl	$1, %eax
+.L1336:
+	movq	-24(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L1337
+	call	__stack_chk_fail@PLT
+.L1337:
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5002:
+	.size	stbi__parse_uncompressed_block, .-stbi__parse_uncompressed_block
+	.section	.rodata
+.LC50:
+	.string	"bad zlib header"
+.LC51:
+	.string	"no preset dict"
+.LC52:
+	.string	"bad compression"
+	.text
+	.type	stbi__parse_zlib_header, @function
+stbi__parse_zlib_header:
+.LFB5003:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$24, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__zget8
+	movzbl	%al, %eax
+	movl	%eax, -12(%rbp)
+	movl	-12(%rbp), %eax
+	andl	$15, %eax
+	movl	%eax, -8(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__zget8
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__zeof
+	testl	%eax, %eax
+	je	.L1339
+	leaq	.LC50(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1340
+.L1339:
+	movl	-12(%rbp), %eax
+	sall	$8, %eax
+	movl	%eax, %edx
+	movl	-4(%rbp), %eax
+	addl	%edx, %eax
+	movslq	%eax, %rdx
+	imulq	$-2078209981, %rdx, %rdx
+	shrq	$32, %rdx
+	addl	%eax, %edx
+	sarl	$4, %edx
+	movl	%eax, %ecx
+	sarl	$31, %ecx
+	subl	%ecx, %edx
+	movl	%edx, %ecx
+	sall	$5, %ecx
+	subl	%edx, %ecx
+	subl	%ecx, %eax
+	movl	%eax, %edx
+	testl	%edx, %edx
+	je	.L1341
+	leaq	.LC50(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1340
+.L1341:
+	movl	-4(%rbp), %eax
+	andl	$32, %eax
+	testl	%eax, %eax
+	je	.L1342
+	leaq	.LC51(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1340
+.L1342:
+	cmpl	$8, -8(%rbp)
+	je	.L1343
+	leaq	.LC52(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1340
+.L1343:
+	movl	$1, %eax
+.L1340:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5003:
+	.size	stbi__parse_zlib_header, .-stbi__parse_zlib_header
+	.section	.rodata
+	.align 32
+	.type	stbi__zdefault_length, @object
+	.size	stbi__zdefault_length, 288
+stbi__zdefault_length:
+	.ascii	"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b"
+	.ascii	"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b"
+	.ascii	"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b"
+	.ascii	"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b"
+	.ascii	"\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\t\t\t\t\t\t"
+	.ascii	"\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t"
+	.ascii	"\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t"
+	.ascii	"\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t"
+	.ascii	"\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\007\007\007\007\007\007\007"
+	.ascii	"\007\007\007\007\007\007\007\007\007\007\007\007\007\007\007"
+	.ascii	"\007\007\b\b\b\b\b\b\b\b"
+	.align 32
+	.type	stbi__zdefault_distance, @object
+	.size	stbi__zdefault_distance, 32
+stbi__zdefault_distance:
+	.ascii	"\005\005\005\005\005\005\005\005\005\005\005\005\005\005\005"
+	.ascii	"\005\005\005\005\005\005\005\005\005\005\005\005\005\005\005"
+	.ascii	"\005\005"
+	.text
+	.type	stbi__parse_zlib, @function
+stbi__parse_zlib:
+.LFB5004:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movl	%esi, -28(%rbp)
+	cmpl	$0, -28(%rbp)
+	je	.L1345
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__parse_zlib_header
+	testl	%eax, %eax
+	jne	.L1345
+	movl	$0, %eax
+	jmp	.L1346
+.L1345:
+	movq	-24(%rbp), %rax
+	movl	$0, 16(%rax)
+	movq	-24(%rbp), %rax
+	movl	$0, 24(%rax)
+	movq	-24(%rbp), %rax
+	movl	$0, 20(%rax)
+.L1353:
+	movq	-24(%rbp), %rax
+	movl	$1, %esi
+	movq	%rax, %rdi
+	call	stbi__zreceive
+	movl	%eax, -8(%rbp)
+	movq	-24(%rbp), %rax
+	movl	$2, %esi
+	movq	%rax, %rdi
+	call	stbi__zreceive
+	movl	%eax, -4(%rbp)
+	cmpl	$0, -4(%rbp)
+	jne	.L1347
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__parse_uncompressed_block
+	testl	%eax, %eax
+	jne	.L1348
+	movl	$0, %eax
+	jmp	.L1346
+.L1347:
+	cmpl	$3, -4(%rbp)
+	jne	.L1349
+	movl	$0, %eax
+	jmp	.L1346
+.L1349:
+	cmpl	$1, -4(%rbp)
+	jne	.L1350
+	movq	-24(%rbp), %rax
+	addq	$60, %rax
+	movl	$288, %edx
+	leaq	stbi__zdefault_length(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__zbuild_huffman
+	testl	%eax, %eax
+	jne	.L1351
+	movl	$0, %eax
+	jmp	.L1346
+.L1351:
+	movq	-24(%rbp), %rax
+	addq	$2080, %rax
+	movl	$32, %edx
+	leaq	stbi__zdefault_distance(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__zbuild_huffman
+	testl	%eax, %eax
+	jne	.L1352
+	movl	$0, %eax
+	jmp	.L1346
+.L1350:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__compute_huffman_codes
+	testl	%eax, %eax
+	jne	.L1352
+	movl	$0, %eax
+	jmp	.L1346
+.L1352:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__parse_huffman_block
+	testl	%eax, %eax
+	jne	.L1348
+	movl	$0, %eax
+	jmp	.L1346
+.L1348:
+	cmpl	$0, -8(%rbp)
+	je	.L1353
+	movl	$1, %eax
+.L1346:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5004:
+	.size	stbi__parse_zlib, .-stbi__parse_zlib
+	.type	stbi__do_zlib, @function
+stbi__do_zlib:
+.LFB5005:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	%rsi, -16(%rbp)
+	movl	%edx, -20(%rbp)
+	movl	%ecx, -24(%rbp)
+	movl	%r8d, -28(%rbp)
+	movq	-8(%rbp), %rax
+	movq	-16(%rbp), %rdx
+	movq	%rdx, 40(%rax)
+	movq	-8(%rbp), %rax
+	movq	-16(%rbp), %rdx
+	movq	%rdx, 32(%rax)
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-16(%rbp), %rax
+	addq	%rax, %rdx
+	movq	-8(%rbp), %rax
+	movq	%rdx, 48(%rax)
+	movq	-8(%rbp), %rax
+	movl	-24(%rbp), %edx
+	movl	%edx, 56(%rax)
+	movl	-28(%rbp), %edx
+	movq	-8(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__parse_zlib
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5005:
+	.size	stbi__do_zlib, .-stbi__do_zlib
+	.globl	stbi_zlib_decode_malloc_guesssize
+	.type	stbi_zlib_decode_malloc_guesssize, @function
+stbi_zlib_decode_malloc_guesssize:
+.LFB5006:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$4160, %rsp
+	movq	%rdi, -4136(%rbp)
+	movl	%esi, -4140(%rbp)
+	movl	%edx, -4144(%rbp)
+	movq	%rcx, -4152(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movl	-4144(%rbp), %eax
+	cltq
+	movq	%rax, %rdi
+	call	stbi__malloc
+	movq	%rax, -4120(%rbp)
+	cmpq	$0, -4120(%rbp)
+	jne	.L1357
+	movl	$0, %eax
+	jmp	.L1361
+.L1357:
+	movq	-4136(%rbp), %rax
+	movq	%rax, -4112(%rbp)
+	movl	-4140(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-4136(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -4104(%rbp)
+	movl	-4144(%rbp), %edx
+	movq	-4120(%rbp), %rsi
+	leaq	-4112(%rbp), %rax
+	movl	$1, %r8d
+	movl	$1, %ecx
+	movq	%rax, %rdi
+	call	stbi__do_zlib
+	testl	%eax, %eax
+	je	.L1359
+	cmpq	$0, -4152(%rbp)
+	je	.L1360
+	movq	-4080(%rbp), %rdx
+	movq	-4072(%rbp), %rax
+	subq	%rax, %rdx
+	movq	-4152(%rbp), %rax
+	movl	%edx, (%rax)
+.L1360:
+	movq	-4072(%rbp), %rax
+	jmp	.L1361
+.L1359:
+	movq	-4072(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+.L1361:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L1362
+	call	__stack_chk_fail@PLT
+.L1362:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5006:
+	.size	stbi_zlib_decode_malloc_guesssize, .-stbi_zlib_decode_malloc_guesssize
+	.globl	stbi_zlib_decode_malloc
+	.type	stbi_zlib_decode_malloc, @function
+stbi_zlib_decode_malloc:
+.LFB5007:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -8(%rbp)
+	movl	%esi, -12(%rbp)
+	movq	%rdx, -24(%rbp)
+	movq	-24(%rbp), %rdx
+	movl	-12(%rbp), %esi
+	movq	-8(%rbp), %rax
+	movq	%rdx, %rcx
+	movl	$16384, %edx
+	movq	%rax, %rdi
+	call	stbi_zlib_decode_malloc_guesssize
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5007:
+	.size	stbi_zlib_decode_malloc, .-stbi_zlib_decode_malloc
+	.globl	stbi_zlib_decode_malloc_guesssize_headerflag
+	.type	stbi_zlib_decode_malloc_guesssize_headerflag, @function
+stbi_zlib_decode_malloc_guesssize_headerflag:
+.LFB5008:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$4160, %rsp
+	movq	%rdi, -4136(%rbp)
+	movl	%esi, -4140(%rbp)
+	movl	%edx, -4144(%rbp)
+	movq	%rcx, -4152(%rbp)
+	movl	%r8d, -4156(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movl	-4144(%rbp), %eax
+	cltq
+	movq	%rax, %rdi
+	call	stbi__malloc
+	movq	%rax, -4120(%rbp)
+	cmpq	$0, -4120(%rbp)
+	jne	.L1366
+	movl	$0, %eax
+	jmp	.L1370
+.L1366:
+	movq	-4136(%rbp), %rax
+	movq	%rax, -4112(%rbp)
+	movl	-4140(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-4136(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -4104(%rbp)
+	movl	-4156(%rbp), %ecx
+	movl	-4144(%rbp), %edx
+	movq	-4120(%rbp), %rsi
+	leaq	-4112(%rbp), %rax
+	movl	%ecx, %r8d
+	movl	$1, %ecx
+	movq	%rax, %rdi
+	call	stbi__do_zlib
+	testl	%eax, %eax
+	je	.L1368
+	cmpq	$0, -4152(%rbp)
+	je	.L1369
+	movq	-4080(%rbp), %rdx
+	movq	-4072(%rbp), %rax
+	subq	%rax, %rdx
+	movq	-4152(%rbp), %rax
+	movl	%edx, (%rax)
+.L1369:
+	movq	-4072(%rbp), %rax
+	jmp	.L1370
+.L1368:
+	movq	-4072(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+.L1370:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L1371
+	call	__stack_chk_fail@PLT
+.L1371:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5008:
+	.size	stbi_zlib_decode_malloc_guesssize_headerflag, .-stbi_zlib_decode_malloc_guesssize_headerflag
+	.globl	stbi_zlib_decode_buffer
+	.type	stbi_zlib_decode_buffer, @function
+stbi_zlib_decode_buffer:
+.LFB5009:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$4144, %rsp
+	movq	%rdi, -4120(%rbp)
+	movl	%esi, -4124(%rbp)
+	movq	%rdx, -4136(%rbp)
+	movl	%ecx, -4128(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-4136(%rbp), %rax
+	movq	%rax, -4112(%rbp)
+	movl	-4128(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-4136(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -4104(%rbp)
+	movl	-4124(%rbp), %edx
+	movq	-4120(%rbp), %rsi
+	leaq	-4112(%rbp), %rax
+	movl	$1, %r8d
+	movl	$0, %ecx
+	movq	%rax, %rdi
+	call	stbi__do_zlib
+	testl	%eax, %eax
+	je	.L1373
+	movq	-4080(%rbp), %rdx
+	movq	-4072(%rbp), %rax
+	subq	%rax, %rdx
+	movl	%edx, %eax
+	jmp	.L1375
+.L1373:
+	movl	$-1, %eax
+.L1375:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L1376
+	call	__stack_chk_fail@PLT
+.L1376:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5009:
+	.size	stbi_zlib_decode_buffer, .-stbi_zlib_decode_buffer
+	.globl	stbi_zlib_decode_noheader_malloc
+	.type	stbi_zlib_decode_noheader_malloc, @function
+stbi_zlib_decode_noheader_malloc:
+.LFB5010:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$4160, %rsp
+	movq	%rdi, -4136(%rbp)
+	movl	%esi, -4140(%rbp)
+	movq	%rdx, -4152(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movl	$16384, %edi
+	call	stbi__malloc
+	movq	%rax, -4120(%rbp)
+	cmpq	$0, -4120(%rbp)
+	jne	.L1378
+	movl	$0, %eax
+	jmp	.L1382
+.L1378:
+	movq	-4136(%rbp), %rax
+	movq	%rax, -4112(%rbp)
+	movl	-4140(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-4136(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -4104(%rbp)
+	movq	-4120(%rbp), %rsi
+	leaq	-4112(%rbp), %rax
+	movl	$0, %r8d
+	movl	$1, %ecx
+	movl	$16384, %edx
+	movq	%rax, %rdi
+	call	stbi__do_zlib
+	testl	%eax, %eax
+	je	.L1380
+	cmpq	$0, -4152(%rbp)
+	je	.L1381
+	movq	-4080(%rbp), %rdx
+	movq	-4072(%rbp), %rax
+	subq	%rax, %rdx
+	movq	-4152(%rbp), %rax
+	movl	%edx, (%rax)
+.L1381:
+	movq	-4072(%rbp), %rax
+	jmp	.L1382
+.L1380:
+	movq	-4072(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+.L1382:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L1383
+	call	__stack_chk_fail@PLT
+.L1383:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5010:
+	.size	stbi_zlib_decode_noheader_malloc, .-stbi_zlib_decode_noheader_malloc
+	.globl	stbi_zlib_decode_noheader_buffer
+	.type	stbi_zlib_decode_noheader_buffer, @function
+stbi_zlib_decode_noheader_buffer:
+.LFB5011:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$4144, %rsp
+	movq	%rdi, -4120(%rbp)
+	movl	%esi, -4124(%rbp)
+	movq	%rdx, -4136(%rbp)
+	movl	%ecx, -4128(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-4136(%rbp), %rax
+	movq	%rax, -4112(%rbp)
+	movl	-4128(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-4136(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -4104(%rbp)
+	movl	-4124(%rbp), %edx
+	movq	-4120(%rbp), %rsi
+	leaq	-4112(%rbp), %rax
+	movl	$0, %r8d
+	movl	$0, %ecx
+	movq	%rax, %rdi
+	call	stbi__do_zlib
+	testl	%eax, %eax
+	je	.L1385
+	movq	-4080(%rbp), %rdx
+	movq	-4072(%rbp), %rax
+	subq	%rax, %rdx
+	movl	%edx, %eax
+	jmp	.L1387
+.L1385:
+	movl	$-1, %eax
+.L1387:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L1388
+	call	__stack_chk_fail@PLT
+.L1388:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5011:
+	.size	stbi_zlib_decode_noheader_buffer, .-stbi_zlib_decode_noheader_buffer
+	.type	stbi__get_chunk_header, @function
+stbi__get_chunk_header:
+.LFB5012:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	movl	%eax, -8(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	movl	%eax, -4(%rbp)
+	movq	-8(%rbp), %rax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5012:
+	.size	stbi__get_chunk_header, .-stbi__get_chunk_header
+	.section	.rodata
+.LC53:
+	.string	"bad png sig"
+	.text
+	.type	stbi__check_png_header, @function
+stbi__check_png_header:
+.LFB5013:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movl	$0, -4(%rbp)
+	jmp	.L1392
+.L1395:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movl	-4(%rbp), %edx
+	movslq	%edx, %rdx
+	leaq	png_sig.10(%rip), %rcx
+	movzbl	(%rdx,%rcx), %edx
+	cmpb	%dl, %al
+	je	.L1393
+	leaq	.LC53(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1394
+.L1393:
+	addl	$1, -4(%rbp)
+.L1392:
+	cmpl	$7, -4(%rbp)
+	jle	.L1395
+	movl	$1, %eax
+.L1394:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5013:
+	.size	stbi__check_png_header, .-stbi__check_png_header
+	.data
+	.type	first_row_filter, @object
+	.size	first_row_filter, 5
+first_row_filter:
+	.string	""
+	.string	"\001"
+	.ascii	"\005\001"
+	.text
+	.type	stbi__paeth, @function
+stbi__paeth:
+.LFB5014:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -36(%rbp)
+	movl	%esi, -40(%rbp)
+	movl	%edx, -44(%rbp)
+	movl	-44(%rbp), %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	leal	(%rax,%rdx), %ecx
+	movl	-36(%rbp), %edx
+	movl	-40(%rbp), %eax
+	addl	%eax, %edx
+	movl	%ecx, %eax
+	subl	%edx, %eax
+	movl	%eax, -20(%rbp)
+	movl	-40(%rbp), %edx
+	movl	-36(%rbp), %eax
+	cmpl	%eax, %edx
+	cmovle	%edx, %eax
+	movl	%eax, -16(%rbp)
+	movl	-36(%rbp), %edx
+	movl	-40(%rbp), %eax
+	cmpl	%eax, %edx
+	cmovge	%edx, %eax
+	movl	%eax, -12(%rbp)
+	movl	-12(%rbp), %eax
+	cmpl	-20(%rbp), %eax
+	jg	.L1397
+	movl	-16(%rbp), %eax
+	jmp	.L1398
+.L1397:
+	movl	-44(%rbp), %eax
+.L1398:
+	movl	%eax, -8(%rbp)
+	movl	-20(%rbp), %eax
+	cmpl	-16(%rbp), %eax
+	jg	.L1399
+	movl	-12(%rbp), %eax
+	jmp	.L1400
+.L1399:
+	movl	-8(%rbp), %eax
+.L1400:
+	movl	%eax, -4(%rbp)
+	movl	-4(%rbp), %eax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5014:
+	.size	stbi__paeth, .-stbi__paeth
+	.section	.rodata
+	.align 8
+	.type	stbi__depth_scale_table, @object
+	.size	stbi__depth_scale_table, 9
+stbi__depth_scale_table:
+	.string	""
+	.string	"\377U"
+	.string	"\021"
+	.string	""
+	.string	""
+	.ascii	"\001"
+.LC54:
+	.string	"img_n == 3"
+	.text
+	.type	stbi__create_png_alpha_expand8, @function
+stbi__create_png_alpha_expand8:
+.LFB5015:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movl	%edx, -36(%rbp)
+	movl	%ecx, -40(%rbp)
+	cmpl	$1, -40(%rbp)
+	jne	.L1403
+	movl	-36(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -4(%rbp)
+	jmp	.L1404
+.L1405:
+	movl	-4(%rbp), %eax
+	addl	%eax, %eax
+	cltq
+	leaq	1(%rax), %rdx
+	movq	-24(%rbp), %rax
+	addq	%rdx, %rax
+	movb	$-1, (%rax)
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-32(%rbp), %rax
+	addq	%rdx, %rax
+	movl	-4(%rbp), %edx
+	addl	%edx, %edx
+	movslq	%edx, %rcx
+	movq	-24(%rbp), %rdx
+	addq	%rcx, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	subl	$1, -4(%rbp)
+.L1404:
+	cmpl	$0, -4(%rbp)
+	jns	.L1405
+	jmp	.L1410
+.L1403:
+	cmpl	$3, -40(%rbp)
+	je	.L1407
+	leaq	__PRETTY_FUNCTION__.9(%rip), %rax
+	movq	%rax, %rcx
+	movl	$4685, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC54(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L1407:
+	movl	-36(%rbp), %eax
+	subl	$1, %eax
+	movl	%eax, -4(%rbp)
+	jmp	.L1408
+.L1409:
+	movl	-4(%rbp), %eax
+	sall	$2, %eax
+	cltq
+	leaq	3(%rax), %rdx
+	movq	-24(%rbp), %rax
+	addq	%rdx, %rax
+	movb	$-1, (%rax)
+	movl	-4(%rbp), %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%edx, %eax
+	cltq
+	leaq	2(%rax), %rdx
+	movq	-32(%rbp), %rax
+	addq	%rdx, %rax
+	movl	-4(%rbp), %edx
+	sall	$2, %edx
+	movslq	%edx, %rdx
+	leaq	2(%rdx), %rcx
+	movq	-24(%rbp), %rdx
+	addq	%rcx, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	movl	-4(%rbp), %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%edx, %eax
+	cltq
+	leaq	1(%rax), %rdx
+	movq	-32(%rbp), %rax
+	addq	%rdx, %rax
+	movl	-4(%rbp), %edx
+	sall	$2, %edx
+	movslq	%edx, %rdx
+	leaq	1(%rdx), %rcx
+	movq	-24(%rbp), %rdx
+	addq	%rcx, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	movl	-4(%rbp), %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%edx, %eax
+	movslq	%eax, %rdx
+	movq	-32(%rbp), %rax
+	addq	%rdx, %rax
+	movl	-4(%rbp), %edx
+	sall	$2, %edx
+	movslq	%edx, %rcx
+	movq	-24(%rbp), %rdx
+	addq	%rcx, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	subl	$1, -4(%rbp)
+.L1408:
+	cmpl	$0, -4(%rbp)
+	jns	.L1409
+.L1410:
+	nop
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5015:
+	.size	stbi__create_png_alpha_expand8, .-stbi__create_png_alpha_expand8
+	.section	.rodata
+	.align 8
+.LC55:
+	.string	"out_n == s->img_n || out_n == s->img_n+1"
+.LC56:
+	.string	"not enough pixels"
+.LC57:
+	.string	"invalid filter"
+.LC58:
+	.string	"depth == 1"
+.LC59:
+	.string	"img_n+1 == out_n"
+	.text
+	.type	stbi__create_png_image_raw, @function
+stbi__create_png_image_raw:
+.LFB5016:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$184, %rsp
+	.cfi_offset 3, -24
+	movq	%rdi, -168(%rbp)
+	movq	%rsi, -176(%rbp)
+	movl	%edx, -180(%rbp)
+	movl	%ecx, -184(%rbp)
+	movl	%r8d, -188(%rbp)
+	movl	%r9d, -192(%rbp)
+	cmpl	$16, 16(%rbp)
+	jne	.L1412
+	movl	$2, %eax
+	jmp	.L1413
+.L1412:
+	movl	$1, %eax
+.L1413:
+	movl	%eax, -116(%rbp)
+	movq	-168(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, -48(%rbp)
+	movl	-184(%rbp), %eax
+	imull	-188(%rbp), %eax
+	movl	%eax, %edx
+	movl	-116(%rbp), %eax
+	imull	%edx, %eax
+	movl	%eax, -112(%rbp)
+	movl	$1, -136(%rbp)
+	movq	-48(%rbp), %rax
+	movl	8(%rax), %eax
+	movl	%eax, -108(%rbp)
+	movl	-184(%rbp), %eax
+	imull	-116(%rbp), %eax
+	movl	%eax, -104(%rbp)
+	movl	-108(%rbp), %eax
+	imull	-116(%rbp), %eax
+	movl	%eax, -128(%rbp)
+	movl	-188(%rbp), %eax
+	movl	%eax, -124(%rbp)
+	movq	-48(%rbp), %rax
+	movl	8(%rax), %eax
+	cmpl	%eax, -184(%rbp)
+	je	.L1414
+	movq	-48(%rbp), %rax
+	movl	8(%rax), %eax
+	addl	$1, %eax
+	cmpl	%eax, -184(%rbp)
+	je	.L1414
+	leaq	__PRETTY_FUNCTION__.8(%rip), %rax
+	movq	%rax, %rcx
+	movl	$4711, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC55(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L1414:
+	movl	-192(%rbp), %esi
+	movl	-188(%rbp), %eax
+	movl	-104(%rbp), %edx
+	movl	$0, %ecx
+	movl	%eax, %edi
+	call	stbi__malloc_mad3
+	movq	-168(%rbp), %rdx
+	movq	%rax, 24(%rdx)
+	movq	-168(%rbp), %rax
+	movq	24(%rax), %rax
+	testq	%rax, %rax
+	jne	.L1415
+	leaq	.LC37(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1416
+.L1415:
+	movl	-188(%rbp), %esi
+	movl	16(%rbp), %edx
+	movl	-108(%rbp), %eax
+	movl	$7, %ecx
+	movl	%eax, %edi
+	call	stbi__mad3sizes_valid
+	testl	%eax, %eax
+	jne	.L1417
+	leaq	.LC32(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1416
+.L1417:
+	movl	-108(%rbp), %eax
+	imull	-188(%rbp), %eax
+	movl	%eax, %edx
+	movl	16(%rbp), %eax
+	imull	%edx, %eax
+	addl	$7, %eax
+	shrl	$3, %eax
+	movl	%eax, -100(%rbp)
+	movl	-100(%rbp), %edx
+	movl	-192(%rbp), %ecx
+	movl	-100(%rbp), %eax
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__mad2sizes_valid
+	testl	%eax, %eax
+	jne	.L1418
+	leaq	.LC32(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1416
+.L1418:
+	movl	-100(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	-192(%rbp), %eax
+	imull	%edx, %eax
+	movl	%eax, -96(%rbp)
+	movl	-180(%rbp), %eax
+	cmpl	-96(%rbp), %eax
+	jnb	.L1419
+	leaq	.LC56(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1416
+.L1419:
+	movl	-100(%rbp), %eax
+	movl	$0, %edx
+	movl	$2, %esi
+	movl	%eax, %edi
+	call	stbi__malloc_mad2
+	movq	%rax, -40(%rbp)
+	cmpq	$0, -40(%rbp)
+	jne	.L1420
+	leaq	.LC37(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1416
+.L1420:
+	cmpl	$7, 16(%rbp)
+	jg	.L1421
+	movl	$1, -128(%rbp)
+	movl	-100(%rbp), %eax
+	movl	%eax, -124(%rbp)
+.L1421:
+	movl	$0, -140(%rbp)
+	jmp	.L1422
+.L1477:
+	movl	-140(%rbp), %eax
+	andl	$1, %eax
+	imull	-100(%rbp), %eax
+	movl	%eax, %edx
+	movq	-40(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -80(%rbp)
+	movl	-140(%rbp), %eax
+	andl	$1, %eax
+	testl	%eax, %eax
+	sete	%al
+	movzbl	%al, %eax
+	imull	-100(%rbp), %eax
+	movl	%eax, %edx
+	movq	-40(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -32(%rbp)
+	movq	-168(%rbp), %rax
+	movq	24(%rax), %rdx
+	movl	-112(%rbp), %eax
+	imull	-140(%rbp), %eax
+	movl	%eax, %eax
+	addq	%rdx, %rax
+	movq	%rax, -24(%rbp)
+	movl	-124(%rbp), %eax
+	imull	-128(%rbp), %eax
+	movl	%eax, -92(%rbp)
+	movq	-176(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	%rdx, -176(%rbp)
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	movl	%eax, -120(%rbp)
+	cmpl	$4, -120(%rbp)
+	jle	.L1423
+	leaq	.LC57(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	movl	%eax, -136(%rbp)
+	jmp	.L1424
+.L1423:
+	cmpl	$0, -140(%rbp)
+	jne	.L1425
+	movl	-120(%rbp), %eax
+	cltq
+	leaq	first_row_filter(%rip), %rdx
+	movzbl	(%rax,%rdx), %eax
+	movzbl	%al, %eax
+	movl	%eax, -120(%rbp)
+.L1425:
+	cmpl	$5, -120(%rbp)
+	ja	.L1426
+	movl	-120(%rbp), %eax
+	leaq	0(,%rax,4), %rdx
+	leaq	.L1428(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	cltq
+	leaq	.L1428(%rip), %rdx
+	addq	%rdx, %rax
+	jmp	*%rax
+	.section	.rodata
+	.align 4
+	.align 4
+.L1428:
+	.long	.L1433-.L1428
+	.long	.L1432-.L1428
+	.long	.L1431-.L1428
+	.long	.L1430-.L1428
+	.long	.L1429-.L1428
+	.long	.L1427-.L1428
+	.text
+.L1433:
+	movl	-92(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-176(%rbp), %rcx
+	movq	-80(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	memcpy@PLT
+	jmp	.L1426
+.L1432:
+	movl	-128(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-176(%rbp), %rcx
+	movq	-80(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	memcpy@PLT
+	movl	-128(%rbp), %eax
+	movl	%eax, -132(%rbp)
+	jmp	.L1434
+.L1435:
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-176(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %esi
+	movl	-132(%rbp), %eax
+	subl	-128(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-80(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %ecx
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-80(%rbp), %rax
+	addq	%rdx, %rax
+	leal	(%rsi,%rcx), %edx
+	movb	%dl, (%rax)
+	addl	$1, -132(%rbp)
+.L1434:
+	movl	-132(%rbp), %eax
+	cmpl	-92(%rbp), %eax
+	jl	.L1435
+	jmp	.L1426
+.L1431:
+	movl	$0, -132(%rbp)
+	jmp	.L1436
+.L1437:
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-176(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %esi
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-32(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %ecx
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-80(%rbp), %rax
+	addq	%rdx, %rax
+	leal	(%rsi,%rcx), %edx
+	movb	%dl, (%rax)
+	addl	$1, -132(%rbp)
+.L1436:
+	movl	-132(%rbp), %eax
+	cmpl	-92(%rbp), %eax
+	jl	.L1437
+	jmp	.L1426
+.L1430:
+	movl	$0, -132(%rbp)
+	jmp	.L1438
+.L1439:
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-176(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %ecx
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-32(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	shrb	%al
+	movl	%eax, %esi
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-80(%rbp), %rax
+	addq	%rdx, %rax
+	leal	(%rcx,%rsi), %edx
+	movb	%dl, (%rax)
+	addl	$1, -132(%rbp)
+.L1438:
+	movl	-132(%rbp), %eax
+	cmpl	-128(%rbp), %eax
+	jl	.L1439
+	movl	-128(%rbp), %eax
+	movl	%eax, -132(%rbp)
+	jmp	.L1440
+.L1441:
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-176(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %esi
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-32(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	-132(%rbp), %eax
+	subl	-128(%rbp), %eax
+	movslq	%eax, %rcx
+	movq	-80(%rbp), %rax
+	addq	%rcx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	%edx, %eax
+	sarl	%eax
+	movl	%eax, %ecx
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-80(%rbp), %rax
+	addq	%rdx, %rax
+	leal	(%rsi,%rcx), %edx
+	movb	%dl, (%rax)
+	addl	$1, -132(%rbp)
+.L1440:
+	movl	-132(%rbp), %eax
+	cmpl	-92(%rbp), %eax
+	jl	.L1441
+	jmp	.L1426
+.L1429:
+	movl	$0, -132(%rbp)
+	jmp	.L1442
+.L1443:
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-176(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %esi
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-32(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %ecx
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-80(%rbp), %rax
+	addq	%rdx, %rax
+	leal	(%rsi,%rcx), %edx
+	movb	%dl, (%rax)
+	addl	$1, -132(%rbp)
+.L1442:
+	movl	-132(%rbp), %eax
+	cmpl	-128(%rbp), %eax
+	jl	.L1443
+	movl	-128(%rbp), %eax
+	movl	%eax, -132(%rbp)
+	jmp	.L1444
+.L1445:
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-176(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %ebx
+	movl	-132(%rbp), %eax
+	subl	-128(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-32(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rcx
+	movq	-32(%rbp), %rax
+	addq	%rcx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %ecx
+	movl	-132(%rbp), %eax
+	subl	-128(%rbp), %eax
+	movslq	%eax, %rsi
+	movq	-80(%rbp), %rax
+	addq	%rsi, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__paeth
+	movl	%eax, %ecx
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-80(%rbp), %rax
+	addq	%rdx, %rax
+	leal	(%rbx,%rcx), %edx
+	movb	%dl, (%rax)
+	addl	$1, -132(%rbp)
+.L1444:
+	movl	-132(%rbp), %eax
+	cmpl	-92(%rbp), %eax
+	jl	.L1445
+	jmp	.L1426
+.L1427:
+	movl	-128(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-176(%rbp), %rcx
+	movq	-80(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	memcpy@PLT
+	movl	-128(%rbp), %eax
+	movl	%eax, -132(%rbp)
+	jmp	.L1446
+.L1447:
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-176(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %ecx
+	movl	-132(%rbp), %eax
+	subl	-128(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-80(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	shrb	%al
+	movl	%eax, %esi
+	movl	-132(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-80(%rbp), %rax
+	addq	%rdx, %rax
+	leal	(%rcx,%rsi), %edx
+	movb	%dl, (%rax)
+	addl	$1, -132(%rbp)
+.L1446:
+	movl	-132(%rbp), %eax
+	cmpl	-92(%rbp), %eax
+	jl	.L1447
+	nop
+.L1426:
+	movl	-92(%rbp), %eax
+	cltq
+	addq	%rax, -176(%rbp)
+	cmpl	$7, 16(%rbp)
+	jg	.L1448
+	cmpl	$0, 24(%rbp)
+	jne	.L1449
+	movl	16(%rbp), %eax
+	cltq
+	leaq	stbi__depth_scale_table(%rip), %rdx
+	movzbl	(%rax,%rdx), %eax
+	jmp	.L1450
+.L1449:
+	movl	$1, %eax
+.L1450:
+	movb	%al, -145(%rbp)
+	movq	-80(%rbp), %rax
+	movq	%rax, -72(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, -64(%rbp)
+	movb	$0, -146(%rbp)
+	movl	-108(%rbp), %eax
+	movl	-188(%rbp), %edx
+	imull	%edx, %eax
+	movl	%eax, -84(%rbp)
+	cmpl	$4, 16(%rbp)
+	jne	.L1451
+	movl	$0, -144(%rbp)
+	jmp	.L1452
+.L1454:
+	movl	-144(%rbp), %eax
+	andl	$1, %eax
+	testl	%eax, %eax
+	jne	.L1453
+	movq	-72(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	%rdx, -72(%rbp)
+	movzbl	(%rax), %eax
+	movb	%al, -146(%rbp)
+.L1453:
+	movzbl	-146(%rbp), %eax
+	shrb	$4, %al
+	movl	%eax, %ecx
+	movq	-64(%rbp), %rdx
+	leaq	1(%rdx), %rax
+	movq	%rax, -64(%rbp)
+	movl	%ecx, %eax
+	mulb	-145(%rbp)
+	movb	%al, (%rdx)
+	salb	$4, -146(%rbp)
+	addl	$1, -144(%rbp)
+.L1452:
+	movl	-144(%rbp), %eax
+	cmpl	-84(%rbp), %eax
+	jb	.L1454
+	jmp	.L1455
+.L1451:
+	cmpl	$2, 16(%rbp)
+	jne	.L1456
+	movl	$0, -144(%rbp)
+	jmp	.L1457
+.L1459:
+	movl	-144(%rbp), %eax
+	andl	$3, %eax
+	testl	%eax, %eax
+	jne	.L1458
+	movq	-72(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	%rdx, -72(%rbp)
+	movzbl	(%rax), %eax
+	movb	%al, -146(%rbp)
+.L1458:
+	movzbl	-146(%rbp), %eax
+	shrb	$6, %al
+	movl	%eax, %ecx
+	movq	-64(%rbp), %rdx
+	leaq	1(%rdx), %rax
+	movq	%rax, -64(%rbp)
+	movl	%ecx, %eax
+	mulb	-145(%rbp)
+	movb	%al, (%rdx)
+	salb	$2, -146(%rbp)
+	addl	$1, -144(%rbp)
+.L1457:
+	movl	-144(%rbp), %eax
+	cmpl	-84(%rbp), %eax
+	jb	.L1459
+	jmp	.L1455
+.L1456:
+	cmpl	$1, 16(%rbp)
+	je	.L1460
+	leaq	__PRETTY_FUNCTION__.8(%rip), %rax
+	movq	%rax, %rcx
+	movl	$4811, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC58(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L1460:
+	movl	$0, -144(%rbp)
+	jmp	.L1461
+.L1463:
+	movl	-144(%rbp), %eax
+	andl	$7, %eax
+	testl	%eax, %eax
+	jne	.L1462
+	movq	-72(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	%rdx, -72(%rbp)
+	movzbl	(%rax), %eax
+	movb	%al, -146(%rbp)
+.L1462:
+	movzbl	-146(%rbp), %eax
+	shrb	$7, %al
+	movl	%eax, %ecx
+	movq	-64(%rbp), %rdx
+	leaq	1(%rdx), %rax
+	movq	%rax, -64(%rbp)
+	movl	%ecx, %eax
+	mulb	-145(%rbp)
+	movb	%al, (%rdx)
+	salb	-146(%rbp)
+	addl	$1, -144(%rbp)
+.L1461:
+	movl	-144(%rbp), %eax
+	cmpl	-84(%rbp), %eax
+	jb	.L1463
+.L1455:
+	movl	-108(%rbp), %eax
+	cmpl	-184(%rbp), %eax
+	je	.L1464
+	movl	-108(%rbp), %ecx
+	movl	-188(%rbp), %edx
+	movq	-24(%rbp), %rsi
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__create_png_alpha_expand8
+	jmp	.L1464
+.L1448:
+	cmpl	$8, 16(%rbp)
+	jne	.L1465
+	movl	-108(%rbp), %eax
+	cmpl	-184(%rbp), %eax
+	jne	.L1466
+	movl	-108(%rbp), %eax
+	imull	-188(%rbp), %eax
+	movl	%eax, %edx
+	movq	-80(%rbp), %rcx
+	movq	-24(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	memcpy@PLT
+	jmp	.L1464
+.L1466:
+	movl	-108(%rbp), %ecx
+	movl	-188(%rbp), %edx
+	movq	-80(%rbp), %rsi
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__create_png_alpha_expand8
+	jmp	.L1464
+.L1465:
+	cmpl	$16, 16(%rbp)
+	jne	.L1464
+	movq	-24(%rbp), %rax
+	movq	%rax, -56(%rbp)
+	movl	-108(%rbp), %eax
+	movl	-188(%rbp), %edx
+	imull	%edx, %eax
+	movl	%eax, -88(%rbp)
+	movl	-108(%rbp), %eax
+	cmpl	-184(%rbp), %eax
+	jne	.L1467
+	movl	$0, -144(%rbp)
+	jmp	.L1468
+.L1469:
+	movq	-80(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	sall	$8, %eax
+	movl	%eax, %edx
+	movq	-80(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	orl	%edx, %eax
+	movl	%eax, %edx
+	movq	-56(%rbp), %rax
+	movw	%dx, (%rax)
+	addl	$1, -144(%rbp)
+	addq	$2, -56(%rbp)
+	addq	$2, -80(%rbp)
+.L1468:
+	movl	-144(%rbp), %eax
+	cmpl	-88(%rbp), %eax
+	jb	.L1469
+	jmp	.L1464
+.L1467:
+	movl	-108(%rbp), %eax
+	addl	$1, %eax
+	cmpl	%eax, -184(%rbp)
+	je	.L1470
+	leaq	__PRETTY_FUNCTION__.8(%rip), %rax
+	movq	%rax, %rcx
+	movl	$4836, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC59(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L1470:
+	cmpl	$1, -108(%rbp)
+	jne	.L1471
+	movl	$0, -144(%rbp)
+	jmp	.L1472
+.L1473:
+	movq	-80(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	sall	$8, %eax
+	movl	%eax, %edx
+	movq	-80(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	orl	%edx, %eax
+	movl	%eax, %edx
+	movq	-56(%rbp), %rax
+	movw	%dx, (%rax)
+	movq	-56(%rbp), %rax
+	addq	$2, %rax
+	movw	$-1, (%rax)
+	addl	$1, -144(%rbp)
+	addq	$4, -56(%rbp)
+	addq	$2, -80(%rbp)
+.L1472:
+	movl	-144(%rbp), %eax
+	cmpl	-188(%rbp), %eax
+	jb	.L1473
+	jmp	.L1464
+.L1471:
+	cmpl	$3, -108(%rbp)
+	je	.L1474
+	leaq	__PRETTY_FUNCTION__.8(%rip), %rax
+	movq	%rax, %rcx
+	movl	$4843, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC54(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L1474:
+	movl	$0, -144(%rbp)
+	jmp	.L1475
+.L1476:
+	movq	-80(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	sall	$8, %eax
+	movl	%eax, %edx
+	movq	-80(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	orl	%edx, %eax
+	movl	%eax, %edx
+	movq	-56(%rbp), %rax
+	movw	%dx, (%rax)
+	movq	-80(%rbp), %rax
+	addq	$2, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	sall	$8, %eax
+	movl	%eax, %edx
+	movq	-80(%rbp), %rax
+	addq	$3, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	orl	%eax, %edx
+	movq	-56(%rbp), %rax
+	addq	$2, %rax
+	movw	%dx, (%rax)
+	movq	-80(%rbp), %rax
+	addq	$4, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	sall	$8, %eax
+	movl	%eax, %edx
+	movq	-80(%rbp), %rax
+	addq	$5, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	orl	%eax, %edx
+	movq	-56(%rbp), %rax
+	addq	$4, %rax
+	movw	%dx, (%rax)
+	movq	-56(%rbp), %rax
+	addq	$6, %rax
+	movw	$-1, (%rax)
+	addl	$1, -144(%rbp)
+	addq	$8, -56(%rbp)
+	addq	$6, -80(%rbp)
+.L1475:
+	movl	-144(%rbp), %eax
+	cmpl	-188(%rbp), %eax
+	jb	.L1476
+.L1464:
+	addl	$1, -140(%rbp)
+.L1422:
+	movl	-140(%rbp), %eax
+	cmpl	-192(%rbp), %eax
+	jb	.L1477
+.L1424:
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	cmpl	$0, -136(%rbp)
+	jne	.L1478
+	movl	$0, %eax
+	jmp	.L1416
+.L1478:
+	movl	$1, %eax
+.L1416:
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5016:
+	.size	stbi__create_png_image_raw, .-stbi__create_png_image_raw
+	.type	stbi__create_png_image, @function
+stbi__create_png_image:
+.LFB5017:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$224, %rsp
+	movq	%rdi, -200(%rbp)
+	movq	%rsi, -208(%rbp)
+	movl	%edx, -212(%rbp)
+	movl	%ecx, -216(%rbp)
+	movl	%r8d, -220(%rbp)
+	movl	%r9d, -224(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	cmpl	$16, -220(%rbp)
+	jne	.L1480
+	movl	$2, %eax
+	jmp	.L1481
+.L1480:
+	movl	$1, %eax
+.L1481:
+	movl	%eax, -180(%rbp)
+	movl	-216(%rbp), %eax
+	imull	-180(%rbp), %eax
+	movl	%eax, -176(%rbp)
+	cmpl	$0, 16(%rbp)
+	jne	.L1482
+	movq	-200(%rbp), %rax
+	movq	(%rax), %rax
+	movl	4(%rax), %r9d
+	movq	-200(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %r8d
+	movl	-216(%rbp), %ecx
+	movl	-212(%rbp), %edx
+	movq	-208(%rbp), %rsi
+	movq	-200(%rbp), %rax
+	movl	-224(%rbp), %edi
+	pushq	%rdi
+	movl	-220(%rbp), %edi
+	pushq	%rdi
+	movq	%rax, %rdi
+	call	stbi__create_png_image_raw
+	addq	$16, %rsp
+	jmp	.L1483
+.L1482:
+	movq	-200(%rbp), %rax
+	movq	(%rax), %rax
+	movl	4(%rax), %eax
+	movl	%eax, %esi
+	movq	-200(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	movl	%eax, %edi
+	movl	-176(%rbp), %eax
+	movl	$0, %ecx
+	movl	%eax, %edx
+	call	stbi__malloc_mad3
+	movq	%rax, -152(%rbp)
+	cmpq	$0, -152(%rbp)
+	jne	.L1484
+	leaq	.LC37(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1483
+.L1484:
+	movl	$0, -192(%rbp)
+	jmp	.L1485
+.L1492:
+	movl	$0, -144(%rbp)
+	movl	$4, -140(%rbp)
+	movl	$0, -136(%rbp)
+	movl	$2, -132(%rbp)
+	movl	$0, -128(%rbp)
+	movl	$1, -124(%rbp)
+	movl	$0, -120(%rbp)
+	movl	$0, -112(%rbp)
+	movl	$0, -108(%rbp)
+	movl	$4, -104(%rbp)
+	movl	$0, -100(%rbp)
+	movl	$2, -96(%rbp)
+	movl	$0, -92(%rbp)
+	movl	$1, -88(%rbp)
+	movl	$8, -80(%rbp)
+	movl	$8, -76(%rbp)
+	movl	$4, -72(%rbp)
+	movl	$4, -68(%rbp)
+	movl	$2, -64(%rbp)
+	movl	$2, -60(%rbp)
+	movl	$1, -56(%rbp)
+	movl	$8, -48(%rbp)
+	movl	$8, -44(%rbp)
+	movl	$8, -40(%rbp)
+	movl	$4, -36(%rbp)
+	movl	$4, -32(%rbp)
+	movl	$2, -28(%rbp)
+	movl	$2, -24(%rbp)
+	movq	-200(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %edx
+	movl	-192(%rbp), %eax
+	cltq
+	movl	-144(%rbp,%rax,4), %eax
+	subl	%eax, %edx
+	movl	-192(%rbp), %eax
+	cltq
+	movl	-80(%rbp,%rax,4), %eax
+	addl	%edx, %eax
+	leal	-1(%rax), %edx
+	movl	-192(%rbp), %eax
+	cltq
+	movl	-80(%rbp,%rax,4), %eax
+	movl	%eax, %ecx
+	movl	%edx, %eax
+	movl	$0, %edx
+	divl	%ecx
+	movl	%eax, -172(%rbp)
+	movq	-200(%rbp), %rax
+	movq	(%rax), %rax
+	movl	4(%rax), %edx
+	movl	-192(%rbp), %eax
+	cltq
+	movl	-112(%rbp,%rax,4), %eax
+	subl	%eax, %edx
+	movl	-192(%rbp), %eax
+	cltq
+	movl	-48(%rbp,%rax,4), %eax
+	addl	%edx, %eax
+	leal	-1(%rax), %edx
+	movl	-192(%rbp), %eax
+	cltq
+	movl	-48(%rbp,%rax,4), %eax
+	movl	%eax, %ecx
+	movl	%edx, %eax
+	movl	$0, %edx
+	divl	%ecx
+	movl	%eax, -168(%rbp)
+	cmpl	$0, -172(%rbp)
+	je	.L1486
+	cmpl	$0, -168(%rbp)
+	je	.L1486
+	movq	-200(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %eax
+	imull	-172(%rbp), %eax
+	imull	-220(%rbp), %eax
+	addl	$7, %eax
+	sarl	$3, %eax
+	addl	$1, %eax
+	imull	-168(%rbp), %eax
+	movl	%eax, -164(%rbp)
+	movl	-168(%rbp), %r9d
+	movl	-172(%rbp), %r8d
+	movl	-216(%rbp), %ecx
+	movl	-212(%rbp), %edx
+	movq	-208(%rbp), %rsi
+	movq	-200(%rbp), %rax
+	movl	-224(%rbp), %edi
+	pushq	%rdi
+	movl	-220(%rbp), %edi
+	pushq	%rdi
+	movq	%rax, %rdi
+	call	stbi__create_png_image_raw
+	addq	$16, %rsp
+	testl	%eax, %eax
+	jne	.L1487
+	movq	-152(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L1483
+.L1487:
+	movl	$0, -184(%rbp)
+	jmp	.L1488
+.L1491:
+	movl	$0, -188(%rbp)
+	jmp	.L1489
+.L1490:
+	movl	-192(%rbp), %eax
+	cltq
+	movl	-48(%rbp,%rax,4), %eax
+	imull	-184(%rbp), %eax
+	movl	%eax, %edx
+	movl	-192(%rbp), %eax
+	cltq
+	movl	-112(%rbp,%rax,4), %eax
+	addl	%edx, %eax
+	movl	%eax, -160(%rbp)
+	movl	-192(%rbp), %eax
+	cltq
+	movl	-80(%rbp,%rax,4), %eax
+	imull	-188(%rbp), %eax
+	movl	%eax, %edx
+	movl	-192(%rbp), %eax
+	cltq
+	movl	-144(%rbp,%rax,4), %eax
+	addl	%edx, %eax
+	movl	%eax, -156(%rbp)
+	movl	-176(%rbp), %eax
+	cltq
+	movq	-200(%rbp), %rdx
+	movq	24(%rdx), %rcx
+	movl	-184(%rbp), %edx
+	movl	%edx, %esi
+	imull	-172(%rbp), %esi
+	movl	-188(%rbp), %edx
+	addl	%esi, %edx
+	imull	-176(%rbp), %edx
+	movslq	%edx, %rdx
+	leaq	(%rcx,%rdx), %rsi
+	movq	-200(%rbp), %rdx
+	movq	(%rdx), %rdx
+	movl	(%rdx), %ecx
+	movl	-160(%rbp), %edx
+	imull	%edx, %ecx
+	movl	-176(%rbp), %edx
+	imull	%ecx, %edx
+	movl	%edx, %ecx
+	movl	-156(%rbp), %edx
+	imull	-176(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	%rdx, %rcx
+	movq	-152(%rbp), %rdx
+	addq	%rdx, %rcx
+	movq	%rax, %rdx
+	movq	%rcx, %rdi
+	call	memcpy@PLT
+	addl	$1, -188(%rbp)
+.L1489:
+	movl	-188(%rbp), %eax
+	cmpl	-172(%rbp), %eax
+	jl	.L1490
+	addl	$1, -184(%rbp)
+.L1488:
+	movl	-184(%rbp), %eax
+	cmpl	-168(%rbp), %eax
+	jl	.L1491
+	movq	-200(%rbp), %rax
+	movq	24(%rax), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	-164(%rbp), %eax
+	addq	%rax, -208(%rbp)
+	movl	-164(%rbp), %eax
+	subl	%eax, -212(%rbp)
+.L1486:
+	addl	$1, -192(%rbp)
+.L1485:
+	cmpl	$6, -192(%rbp)
+	jle	.L1492
+	movq	-200(%rbp), %rax
+	movq	-152(%rbp), %rdx
+	movq	%rdx, 24(%rax)
+	movl	$1, %eax
+.L1483:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L1493
+	call	__stack_chk_fail@PLT
+.L1493:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5017:
+	.size	stbi__create_png_image, .-stbi__create_png_image
+	.section	.rodata
+.LC60:
+	.string	"out_n == 2 || out_n == 4"
+	.text
+	.type	stbi__compute_transparency, @function
+stbi__compute_transparency:
+.LFB5018:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -40(%rbp)
+	movq	%rsi, -48(%rbp)
+	movl	%edx, -52(%rbp)
+	movq	-40(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movl	(%rax), %edx
+	movq	-8(%rbp), %rax
+	movl	4(%rax), %eax
+	imull	%edx, %eax
+	movl	%eax, -20(%rbp)
+	movq	-40(%rbp), %rax
+	movq	24(%rax), %rax
+	movq	%rax, -16(%rbp)
+	cmpl	$2, -52(%rbp)
+	je	.L1495
+	cmpl	$4, -52(%rbp)
+	je	.L1495
+	leaq	__PRETTY_FUNCTION__.7(%rip), %rax
+	movq	%rax, %rcx
+	movl	$4914, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC60(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L1495:
+	cmpl	$2, -52(%rbp)
+	jne	.L1496
+	movl	$0, -24(%rbp)
+	jmp	.L1497
+.L1500:
+	movq	-16(%rbp), %rax
+	movzbl	(%rax), %edx
+	movq	-48(%rbp), %rax
+	movzbl	(%rax), %eax
+	cmpb	%al, %dl
+	jne	.L1498
+	movl	$0, %edx
+	jmp	.L1499
+.L1498:
+	movl	$-1, %edx
+.L1499:
+	movq	-16(%rbp), %rax
+	addq	$1, %rax
+	movb	%dl, (%rax)
+	addq	$2, -16(%rbp)
+	addl	$1, -24(%rbp)
+.L1497:
+	movl	-24(%rbp), %eax
+	cmpl	-20(%rbp), %eax
+	jb	.L1500
+	jmp	.L1501
+.L1496:
+	movl	$0, -24(%rbp)
+	jmp	.L1502
+.L1504:
+	movq	-16(%rbp), %rax
+	movzbl	(%rax), %edx
+	movq	-48(%rbp), %rax
+	movzbl	(%rax), %eax
+	cmpb	%al, %dl
+	jne	.L1503
+	movq	-16(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rax), %edx
+	movq	-48(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rax), %eax
+	cmpb	%al, %dl
+	jne	.L1503
+	movq	-16(%rbp), %rax
+	addq	$2, %rax
+	movzbl	(%rax), %edx
+	movq	-48(%rbp), %rax
+	addq	$2, %rax
+	movzbl	(%rax), %eax
+	cmpb	%al, %dl
+	jne	.L1503
+	movq	-16(%rbp), %rax
+	addq	$3, %rax
+	movb	$0, (%rax)
+.L1503:
+	addq	$4, -16(%rbp)
+	addl	$1, -24(%rbp)
+.L1502:
+	movl	-24(%rbp), %eax
+	cmpl	-20(%rbp), %eax
+	jb	.L1504
+.L1501:
+	movl	$1, %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5018:
+	.size	stbi__compute_transparency, .-stbi__compute_transparency
+	.type	stbi__compute_transparency16, @function
+stbi__compute_transparency16:
+.LFB5019:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -40(%rbp)
+	movq	%rsi, -48(%rbp)
+	movl	%edx, -52(%rbp)
+	movq	-40(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movl	(%rax), %edx
+	movq	-8(%rbp), %rax
+	movl	4(%rax), %eax
+	imull	%edx, %eax
+	movl	%eax, -20(%rbp)
+	movq	-40(%rbp), %rax
+	movq	24(%rax), %rax
+	movq	%rax, -16(%rbp)
+	cmpl	$2, -52(%rbp)
+	je	.L1507
+	cmpl	$4, -52(%rbp)
+	je	.L1507
+	leaq	__PRETTY_FUNCTION__.6(%rip), %rax
+	movq	%rax, %rcx
+	movl	$4939, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC60(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L1507:
+	cmpl	$2, -52(%rbp)
+	jne	.L1508
+	movl	$0, -24(%rbp)
+	jmp	.L1509
+.L1512:
+	movq	-16(%rbp), %rax
+	movzwl	(%rax), %edx
+	movq	-48(%rbp), %rax
+	movzwl	(%rax), %eax
+	cmpw	%ax, %dx
+	jne	.L1510
+	movl	$0, %edx
+	jmp	.L1511
+.L1510:
+	movl	$-1, %edx
+.L1511:
+	movq	-16(%rbp), %rax
+	addq	$2, %rax
+	movw	%dx, (%rax)
+	addq	$4, -16(%rbp)
+	addl	$1, -24(%rbp)
+.L1509:
+	movl	-24(%rbp), %eax
+	cmpl	-20(%rbp), %eax
+	jb	.L1512
+	jmp	.L1513
+.L1508:
+	movl	$0, -24(%rbp)
+	jmp	.L1514
+.L1516:
+	movq	-16(%rbp), %rax
+	movzwl	(%rax), %edx
+	movq	-48(%rbp), %rax
+	movzwl	(%rax), %eax
+	cmpw	%ax, %dx
+	jne	.L1515
+	movq	-16(%rbp), %rax
+	addq	$2, %rax
+	movzwl	(%rax), %edx
+	movq	-48(%rbp), %rax
+	addq	$2, %rax
+	movzwl	(%rax), %eax
+	cmpw	%ax, %dx
+	jne	.L1515
+	movq	-16(%rbp), %rax
+	addq	$4, %rax
+	movzwl	(%rax), %edx
+	movq	-48(%rbp), %rax
+	addq	$4, %rax
+	movzwl	(%rax), %eax
+	cmpw	%ax, %dx
+	jne	.L1515
+	movq	-16(%rbp), %rax
+	addq	$6, %rax
+	movw	$0, (%rax)
+.L1515:
+	addq	$8, -16(%rbp)
+	addl	$1, -24(%rbp)
+.L1514:
+	movl	-24(%rbp), %eax
+	cmpl	-20(%rbp), %eax
+	jb	.L1516
+.L1513:
+	movl	$1, %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5019:
+	.size	stbi__compute_transparency16, .-stbi__compute_transparency16
+	.type	stbi__expand_png_palette, @function
+stbi__expand_png_palette:
+.LFB5020:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$80, %rsp
+	movq	%rdi, -56(%rbp)
+	movq	%rsi, -64(%rbp)
+	movl	%edx, -68(%rbp)
+	movl	%ecx, -72(%rbp)
+	movq	-56(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %edx
+	movq	-56(%rbp), %rax
+	movq	(%rax), %rax
+	movl	4(%rax), %eax
+	imull	%edx, %eax
+	movl	%eax, -36(%rbp)
+	movq	-56(%rbp), %rax
+	movq	24(%rax), %rax
+	movq	%rax, -16(%rbp)
+	movl	-36(%rbp), %eax
+	movl	-72(%rbp), %ecx
+	movl	$0, %edx
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__malloc_mad2
+	movq	%rax, -24(%rbp)
+	cmpq	$0, -24(%rbp)
+	jne	.L1519
+	leaq	.LC37(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1520
+.L1519:
+	movq	-24(%rbp), %rax
+	movq	%rax, -8(%rbp)
+	cmpl	$3, -72(%rbp)
+	jne	.L1521
+	movl	$0, -40(%rbp)
+	jmp	.L1522
+.L1523:
+	movl	-40(%rbp), %edx
+	movq	-16(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	sall	$2, %eax
+	movl	%eax, -28(%rbp)
+	movl	-28(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %edx
+	movq	-24(%rbp), %rax
+	movb	%dl, (%rax)
+	movl	-28(%rbp), %eax
+	cltq
+	leaq	1(%rax), %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movq	-24(%rbp), %rdx
+	addq	$1, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	movl	-28(%rbp), %eax
+	cltq
+	leaq	2(%rax), %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movq	-24(%rbp), %rdx
+	addq	$2, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	addq	$3, -24(%rbp)
+	addl	$1, -40(%rbp)
+.L1522:
+	movl	-40(%rbp), %eax
+	cmpl	-36(%rbp), %eax
+	jb	.L1523
+	jmp	.L1524
+.L1521:
+	movl	$0, -40(%rbp)
+	jmp	.L1525
+.L1526:
+	movl	-40(%rbp), %edx
+	movq	-16(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	sall	$2, %eax
+	movl	%eax, -32(%rbp)
+	movl	-32(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %edx
+	movq	-24(%rbp), %rax
+	movb	%dl, (%rax)
+	movl	-32(%rbp), %eax
+	cltq
+	leaq	1(%rax), %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movq	-24(%rbp), %rdx
+	addq	$1, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	movl	-32(%rbp), %eax
+	cltq
+	leaq	2(%rax), %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movq	-24(%rbp), %rdx
+	addq	$2, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	movl	-32(%rbp), %eax
+	cltq
+	leaq	3(%rax), %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movq	-24(%rbp), %rdx
+	addq	$3, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	addq	$4, -24(%rbp)
+	addl	$1, -40(%rbp)
+.L1525:
+	movl	-40(%rbp), %eax
+	cmpl	-36(%rbp), %eax
+	jb	.L1526
+.L1524:
+	movq	-56(%rbp), %rax
+	movq	24(%rax), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-56(%rbp), %rax
+	movq	-8(%rbp), %rdx
+	movq	%rdx, 24(%rax)
+	movl	$1, %eax
+.L1520:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5020:
+	.size	stbi__expand_png_palette, .-stbi__expand_png_palette
+	.local	stbi__unpremultiply_on_load_global
+	.comm	stbi__unpremultiply_on_load_global,4,4
+	.local	stbi__de_iphone_flag_global
+	.comm	stbi__de_iphone_flag_global,4,4
+	.globl	stbi_set_unpremultiply_on_load
+	.type	stbi_set_unpremultiply_on_load, @function
+stbi_set_unpremultiply_on_load:
+.LFB5021:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -4(%rbp)
+	movl	-4(%rbp), %eax
+	movl	%eax, stbi__unpremultiply_on_load_global(%rip)
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5021:
+	.size	stbi_set_unpremultiply_on_load, .-stbi_set_unpremultiply_on_load
+	.globl	stbi_convert_iphone_png_to_rgb
+	.type	stbi_convert_iphone_png_to_rgb, @function
+stbi_convert_iphone_png_to_rgb:
+.LFB5022:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -4(%rbp)
+	movl	-4(%rbp), %eax
+	movl	%eax, stbi__de_iphone_flag_global(%rip)
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5022:
+	.size	stbi_convert_iphone_png_to_rgb, .-stbi_convert_iphone_png_to_rgb
+	.section	.tbss
+	.align 4
+	.type	stbi__unpremultiply_on_load_local, @object
+	.size	stbi__unpremultiply_on_load_local, 4
+stbi__unpremultiply_on_load_local:
+	.zero	4
+	.align 4
+	.type	stbi__unpremultiply_on_load_set, @object
+	.size	stbi__unpremultiply_on_load_set, 4
+stbi__unpremultiply_on_load_set:
+	.zero	4
+	.align 4
+	.type	stbi__de_iphone_flag_local, @object
+	.size	stbi__de_iphone_flag_local, 4
+stbi__de_iphone_flag_local:
+	.zero	4
+	.align 4
+	.type	stbi__de_iphone_flag_set, @object
+	.size	stbi__de_iphone_flag_set, 4
+stbi__de_iphone_flag_set:
+	.zero	4
+	.text
+	.globl	stbi_set_unpremultiply_on_load_thread
+	.type	stbi_set_unpremultiply_on_load_thread, @function
+stbi_set_unpremultiply_on_load_thread:
+.LFB5023:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -4(%rbp)
+	movl	-4(%rbp), %eax
+	movl	%eax, %fs:stbi__unpremultiply_on_load_local@tpoff
+	movl	$1, %fs:stbi__unpremultiply_on_load_set@tpoff
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5023:
+	.size	stbi_set_unpremultiply_on_load_thread, .-stbi_set_unpremultiply_on_load_thread
+	.globl	stbi_convert_iphone_png_to_rgb_thread
+	.type	stbi_convert_iphone_png_to_rgb_thread, @function
+stbi_convert_iphone_png_to_rgb_thread:
+.LFB5024:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -4(%rbp)
+	movl	-4(%rbp), %eax
+	movl	%eax, %fs:stbi__de_iphone_flag_local@tpoff
+	movl	$1, %fs:stbi__de_iphone_flag_set@tpoff
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5024:
+	.size	stbi_convert_iphone_png_to_rgb_thread, .-stbi_convert_iphone_png_to_rgb_thread
+	.section	.rodata
+.LC61:
+	.string	"s->img_out_n == 4"
+	.text
+	.type	stbi__de_iphone, @function
+stbi__de_iphone:
+.LFB5025:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -40(%rbp)
+	movq	-40(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movl	(%rax), %edx
+	movq	-8(%rbp), %rax
+	movl	4(%rax), %eax
+	imull	%edx, %eax
+	movl	%eax, -20(%rbp)
+	movq	-40(%rbp), %rax
+	movq	24(%rax), %rax
+	movq	%rax, -16(%rbp)
+	movq	-8(%rbp), %rax
+	movl	12(%rax), %eax
+	cmpl	$3, %eax
+	jne	.L1532
+	movl	$0, -24(%rbp)
+	jmp	.L1533
+.L1534:
+	movq	-16(%rbp), %rax
+	movzbl	(%rax), %eax
+	movb	%al, -25(%rbp)
+	movq	-16(%rbp), %rax
+	movzbl	2(%rax), %edx
+	movq	-16(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-16(%rbp), %rax
+	leaq	2(%rax), %rdx
+	movzbl	-25(%rbp), %eax
+	movb	%al, (%rdx)
+	addq	$3, -16(%rbp)
+	addl	$1, -24(%rbp)
+.L1533:
+	movl	-24(%rbp), %eax
+	cmpl	-20(%rbp), %eax
+	jb	.L1534
+	jmp	.L1546
+.L1532:
+	movq	-8(%rbp), %rax
+	movl	12(%rax), %eax
+	cmpl	$4, %eax
+	je	.L1536
+	leaq	__PRETTY_FUNCTION__.5(%rip), %rax
+	movq	%rax, %rcx
+	movl	$5047, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC61(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L1536:
+	movl	%fs:stbi__unpremultiply_on_load_set@tpoff, %eax
+	testl	%eax, %eax
+	je	.L1537
+	movl	%fs:stbi__unpremultiply_on_load_local@tpoff, %eax
+	testl	%eax, %eax
+	setne	%al
+	jmp	.L1538
+.L1537:
+	movl	stbi__unpremultiply_on_load_global(%rip), %eax
+	testl	%eax, %eax
+	setne	%al
+.L1538:
+	testb	%al, %al
+	je	.L1539
+	movl	$0, -24(%rbp)
+	jmp	.L1540
+.L1543:
+	movq	-16(%rbp), %rax
+	movzbl	3(%rax), %eax
+	movb	%al, -28(%rbp)
+	movq	-16(%rbp), %rax
+	movzbl	(%rax), %eax
+	movb	%al, -27(%rbp)
+	cmpb	$0, -28(%rbp)
+	je	.L1541
+	movzbl	-28(%rbp), %eax
+	shrb	%al
+	movb	%al, -26(%rbp)
+	movq	-16(%rbp), %rax
+	addq	$2, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	%edx, %eax
+	sall	$8, %eax
+	subl	%edx, %eax
+	movl	%eax, %ecx
+	movzbl	-26(%rbp), %eax
+	addl	%ecx, %eax
+	movzbl	-28(%rbp), %esi
+	cltd
+	idivl	%esi
+	movl	%eax, %edx
+	movq	-16(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-16(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movl	%edx, %eax
+	sall	$8, %eax
+	subl	%edx, %eax
+	movl	%eax, %ecx
+	movzbl	-26(%rbp), %eax
+	addl	%ecx, %eax
+	movzbl	-28(%rbp), %edi
+	cltd
+	idivl	%edi
+	movl	%eax, %edx
+	movq	-16(%rbp), %rax
+	addq	$1, %rax
+	movb	%dl, (%rax)
+	movzbl	-27(%rbp), %edx
+	movl	%edx, %eax
+	sall	$8, %eax
+	subl	%edx, %eax
+	movl	%eax, %ecx
+	movzbl	-26(%rbp), %eax
+	addl	%ecx, %eax
+	movzbl	-28(%rbp), %esi
+	cltd
+	idivl	%esi
+	movl	%eax, %edx
+	movq	-16(%rbp), %rax
+	addq	$2, %rax
+	movb	%dl, (%rax)
+	jmp	.L1542
+.L1541:
+	movq	-16(%rbp), %rax
+	movzbl	2(%rax), %edx
+	movq	-16(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-16(%rbp), %rax
+	leaq	2(%rax), %rdx
+	movzbl	-27(%rbp), %eax
+	movb	%al, (%rdx)
+.L1542:
+	addq	$4, -16(%rbp)
+	addl	$1, -24(%rbp)
+.L1540:
+	movl	-24(%rbp), %eax
+	cmpl	-20(%rbp), %eax
+	jb	.L1543
+	jmp	.L1546
+.L1539:
+	movl	$0, -24(%rbp)
+	jmp	.L1544
+.L1545:
+	movq	-16(%rbp), %rax
+	movzbl	(%rax), %eax
+	movb	%al, -29(%rbp)
+	movq	-16(%rbp), %rax
+	movzbl	2(%rax), %edx
+	movq	-16(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-16(%rbp), %rax
+	leaq	2(%rax), %rdx
+	movzbl	-29(%rbp), %eax
+	movb	%al, (%rdx)
+	addq	$4, -16(%rbp)
+	addl	$1, -24(%rbp)
+.L1544:
+	movl	-24(%rbp), %eax
+	cmpl	-20(%rbp), %eax
+	jb	.L1545
+.L1546:
+	nop
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5025:
+	.size	stbi__de_iphone, .-stbi__de_iphone
+	.section	.rodata
+.LC62:
+	.string	"multiple IHDR"
+.LC63:
+	.string	"bad IHDR len"
+.LC64:
+	.string	"1/2/4/8/16-bit only"
+.LC65:
+	.string	"bad ctype"
+.LC66:
+	.string	"bad comp method"
+.LC67:
+	.string	"bad filter method"
+.LC68:
+	.string	"bad interlace method"
+.LC69:
+	.string	"0-pixel image"
+.LC70:
+	.string	"first not IHDR"
+.LC71:
+	.string	"invalid PLTE"
+.LC72:
+	.string	"tRNS after IDAT"
+.LC73:
+	.string	"tRNS before PLTE"
+.LC74:
+	.string	"bad tRNS len"
+.LC75:
+	.string	"tRNS with alpha"
+.LC76:
+	.string	"no PLTE"
+.LC77:
+	.string	"IDAT size limit"
+.LC78:
+	.string	"outofdata"
+.LC79:
+	.string	"no IDAT"
+	.text
+	.type	stbi__parse_png_file, @function
+stbi__parse_png_file:
+.LFB5026:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$1176, %rsp
+	.cfi_offset 3, -24
+	movq	%rdi, -1176(%rbp)
+	movl	%esi, -1180(%rbp)
+	movl	%edx, -1184(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -24(%rbp)
+	xorl	%eax, %eax
+	movb	$0, -1154(%rbp)
+	movb	$0, -1153(%rbp)
+	movw	$0, -1059(%rbp)
+	movb	$0, -1057(%rbp)
+	movl	$0, -1148(%rbp)
+	movl	$0, -1144(%rbp)
+	movl	$0, -1136(%rbp)
+	movl	$1, -1132(%rbp)
+	movl	$0, -1124(%rbp)
+	movl	$0, -1120(%rbp)
+	movl	$0, -1116(%rbp)
+	movq	-1176(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, -1096(%rbp)
+	movq	-1176(%rbp), %rax
+	movq	$0, 16(%rax)
+	movq	-1176(%rbp), %rax
+	movq	$0, 8(%rax)
+	movq	-1176(%rbp), %rax
+	movq	$0, 24(%rax)
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__check_png_header
+	testl	%eax, %eax
+	jne	.L1548
+	movl	$0, %eax
+	jmp	.L1633
+.L1548:
+	cmpl	$1, -1180(%rbp)
+	jne	.L1550
+	movl	$1, %eax
+	jmp	.L1633
+.L1550:
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get_chunk_header
+	movq	%rax, -1076(%rbp)
+	movl	-1072(%rbp), %eax
+	cmpl	$1951551059, %eax
+	je	.L1551
+	cmpl	$1951551059, %eax
+	ja	.L1552
+	cmpl	$1347179589, %eax
+	je	.L1553
+	cmpl	$1347179589, %eax
+	ja	.L1552
+	cmpl	$1229472850, %eax
+	je	.L1554
+	cmpl	$1229472850, %eax
+	ja	.L1552
+	cmpl	$1229278788, %eax
+	je	.L1555
+	cmpl	$1229278788, %eax
+	ja	.L1552
+	cmpl	$1130840649, %eax
+	je	.L1556
+	cmpl	$1229209940, %eax
+	je	.L1557
+	jmp	.L1552
+.L1556:
+	movl	$1, -1116(%rbp)
+	movl	-1076(%rbp), %eax
+	movl	%eax, %edx
+	movq	-1096(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	jmp	.L1558
+.L1554:
+	cmpl	$0, -1132(%rbp)
+	jne	.L1559
+	leaq	.LC62(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1559:
+	movl	$0, -1132(%rbp)
+	movl	-1076(%rbp), %eax
+	cmpl	$13, %eax
+	je	.L1561
+	leaq	.LC63(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1561:
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	movq	-1096(%rbp), %rdx
+	movl	%eax, (%rdx)
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	movq	-1096(%rbp), %rdx
+	movl	%eax, 4(%rdx)
+	movq	-1096(%rbp), %rax
+	movl	4(%rax), %eax
+	cmpl	$16777216, %eax
+	jbe	.L1562
+	leaq	.LC32(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1562:
+	movq	-1096(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	$16777216, %eax
+	jbe	.L1563
+	leaq	.LC32(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1563:
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %edx
+	movq	-1176(%rbp), %rax
+	movl	%edx, 32(%rax)
+	movq	-1176(%rbp), %rax
+	movl	32(%rax), %eax
+	cmpl	$1, %eax
+	je	.L1564
+	movq	-1176(%rbp), %rax
+	movl	32(%rax), %eax
+	cmpl	$2, %eax
+	je	.L1564
+	movq	-1176(%rbp), %rax
+	movl	32(%rax), %eax
+	cmpl	$4, %eax
+	je	.L1564
+	movq	-1176(%rbp), %rax
+	movl	32(%rax), %eax
+	cmpl	$8, %eax
+	je	.L1564
+	movq	-1176(%rbp), %rax
+	movl	32(%rax), %eax
+	cmpl	$16, %eax
+	je	.L1564
+	leaq	.LC64(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1564:
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -1120(%rbp)
+	cmpl	$6, -1120(%rbp)
+	jle	.L1565
+	leaq	.LC65(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1565:
+	cmpl	$3, -1120(%rbp)
+	jne	.L1566
+	movq	-1176(%rbp), %rax
+	movl	32(%rax), %eax
+	cmpl	$16, %eax
+	jne	.L1566
+	leaq	.LC65(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1566:
+	cmpl	$3, -1120(%rbp)
+	jne	.L1567
+	movb	$3, -1154(%rbp)
+	jmp	.L1568
+.L1567:
+	movl	-1120(%rbp), %eax
+	andl	$1, %eax
+	testl	%eax, %eax
+	je	.L1568
+	leaq	.LC65(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1568:
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -1112(%rbp)
+	cmpl	$0, -1112(%rbp)
+	je	.L1569
+	leaq	.LC66(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1569:
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -1108(%rbp)
+	cmpl	$0, -1108(%rbp)
+	je	.L1570
+	leaq	.LC67(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1570:
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -1124(%rbp)
+	cmpl	$1, -1124(%rbp)
+	jle	.L1571
+	leaq	.LC68(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1571:
+	movq	-1096(%rbp), %rax
+	movl	(%rax), %eax
+	testl	%eax, %eax
+	je	.L1572
+	movq	-1096(%rbp), %rax
+	movl	4(%rax), %eax
+	testl	%eax, %eax
+	jne	.L1573
+.L1572:
+	leaq	.LC69(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1573:
+	cmpb	$0, -1154(%rbp)
+	jne	.L1574
+	movl	-1120(%rbp), %eax
+	andl	$2, %eax
+	testl	%eax, %eax
+	je	.L1575
+	movl	$3, %edx
+	jmp	.L1576
+.L1575:
+	movl	$1, %edx
+.L1576:
+	movl	-1120(%rbp), %eax
+	sarl	$2, %eax
+	andl	$1, %eax
+	addl	%eax, %edx
+	movq	-1096(%rbp), %rax
+	movl	%edx, 8(%rax)
+	movq	-1096(%rbp), %rax
+	movl	(%rax), %ebx
+	movl	$1073741824, %eax
+	movl	$0, %edx
+	divl	%ebx
+	movl	%eax, %edx
+	movq	-1096(%rbp), %rax
+	movl	8(%rax), %eax
+	movl	%eax, %ecx
+	movl	%edx, %eax
+	movl	$0, %edx
+	divl	%ecx
+	movl	%eax, %edx
+	movq	-1096(%rbp), %rax
+	movl	4(%rax), %eax
+	cmpl	%eax, %edx
+	jnb	.L1635
+	leaq	.LC32(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1574:
+	movq	-1096(%rbp), %rax
+	movl	$1, 8(%rax)
+	movq	-1096(%rbp), %rax
+	movl	(%rax), %ebx
+	movl	$268435456, %eax
+	movl	$0, %edx
+	divl	%ebx
+	movl	%eax, %edx
+	movq	-1096(%rbp), %rax
+	movl	4(%rax), %eax
+	cmpl	%eax, %edx
+	jnb	.L1635
+	leaq	.LC32(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1553:
+	cmpl	$0, -1132(%rbp)
+	je	.L1578
+	leaq	.LC70(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1578:
+	movl	-1076(%rbp), %eax
+	cmpl	$768, %eax
+	jbe	.L1579
+	leaq	.LC71(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1579:
+	movl	-1076(%rbp), %eax
+	movl	%eax, %edx
+	movl	$2863311531, %eax
+	imulq	%rdx, %rax
+	shrq	$32, %rax
+	shrl	%eax
+	movl	%eax, -1136(%rbp)
+	movl	-1136(%rbp), %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%eax, %edx
+	movl	-1076(%rbp), %eax
+	cmpl	%eax, %edx
+	je	.L1580
+	leaq	.LC71(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1580:
+	movl	$0, -1140(%rbp)
+	jmp	.L1581
+.L1582:
+	movl	-1140(%rbp), %eax
+	leal	0(,%rax,4), %ebx
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movl	%ebx, %edx
+	movb	%al, -1056(%rbp,%rdx)
+	movl	-1140(%rbp), %eax
+	sall	$2, %eax
+	leal	1(%rax), %ebx
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movl	%ebx, %edx
+	movb	%al, -1056(%rbp,%rdx)
+	movl	-1140(%rbp), %eax
+	sall	$2, %eax
+	leal	2(%rax), %ebx
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movl	%ebx, %edx
+	movb	%al, -1056(%rbp,%rdx)
+	movl	-1140(%rbp), %eax
+	sall	$2, %eax
+	addl	$3, %eax
+	movl	%eax, %eax
+	movb	$-1, -1056(%rbp,%rax)
+	addl	$1, -1140(%rbp)
+.L1581:
+	movl	-1140(%rbp), %eax
+	cmpl	-1136(%rbp), %eax
+	jb	.L1582
+	jmp	.L1558
+.L1551:
+	cmpl	$0, -1132(%rbp)
+	je	.L1583
+	leaq	.LC70(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1583:
+	movq	-1176(%rbp), %rax
+	movq	8(%rax), %rax
+	testq	%rax, %rax
+	je	.L1584
+	leaq	.LC72(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1584:
+	cmpb	$0, -1154(%rbp)
+	je	.L1585
+	cmpl	$2, -1180(%rbp)
+	jne	.L1586
+	movq	-1096(%rbp), %rax
+	movl	$4, 8(%rax)
+	movl	$1, %eax
+	jmp	.L1633
+.L1586:
+	cmpl	$0, -1136(%rbp)
+	jne	.L1587
+	leaq	.LC73(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1587:
+	movl	-1076(%rbp), %eax
+	cmpl	%eax, -1136(%rbp)
+	jnb	.L1588
+	leaq	.LC74(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1588:
+	movb	$4, -1154(%rbp)
+	movl	$0, -1140(%rbp)
+	jmp	.L1589
+.L1590:
+	movl	-1140(%rbp), %eax
+	sall	$2, %eax
+	leal	3(%rax), %ebx
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movl	%ebx, %edx
+	movb	%al, -1056(%rbp,%rdx)
+	addl	$1, -1140(%rbp)
+.L1589:
+	movl	-1076(%rbp), %eax
+	cmpl	%eax, -1140(%rbp)
+	jb	.L1590
+	jmp	.L1636
+.L1585:
+	movq	-1096(%rbp), %rax
+	movl	8(%rax), %eax
+	andl	$1, %eax
+	testl	%eax, %eax
+	jne	.L1592
+	leaq	.LC75(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1592:
+	movl	-1076(%rbp), %edx
+	movq	-1096(%rbp), %rax
+	movl	8(%rax), %eax
+	addl	%eax, %eax
+	cmpl	%eax, %edx
+	je	.L1593
+	leaq	.LC74(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1593:
+	movb	$1, -1153(%rbp)
+	cmpl	$2, -1180(%rbp)
+	jne	.L1594
+	movq	-1096(%rbp), %rax
+	movl	8(%rax), %eax
+	leal	1(%rax), %edx
+	movq	-1096(%rbp), %rax
+	movl	%edx, 8(%rax)
+	movl	$1, %eax
+	jmp	.L1633
+.L1594:
+	movq	-1176(%rbp), %rax
+	movl	32(%rax), %eax
+	cmpl	$16, %eax
+	jne	.L1595
+	movl	$0, -1128(%rbp)
+	jmp	.L1596
+.L1597:
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, %edx
+	movl	-1128(%rbp), %eax
+	cltq
+	movw	%dx, -1066(%rbp,%rax,2)
+	addl	$1, -1128(%rbp)
+.L1596:
+	movq	-1096(%rbp), %rax
+	movl	8(%rax), %eax
+	cmpl	%eax, -1128(%rbp)
+	jge	.L1636
+	cmpl	$2, -1128(%rbp)
+	jle	.L1597
+	jmp	.L1636
+.L1595:
+	movl	$0, -1128(%rbp)
+	jmp	.L1598
+.L1599:
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, %ecx
+	movq	-1176(%rbp), %rax
+	movl	32(%rax), %eax
+	cltq
+	leaq	stbi__depth_scale_table(%rip), %rdx
+	movzbl	(%rax,%rdx), %edx
+	movl	%ecx, %eax
+	imull	%edx, %eax
+	movl	%eax, %edx
+	movl	-1128(%rbp), %eax
+	cltq
+	movb	%dl, -1059(%rbp,%rax)
+	addl	$1, -1128(%rbp)
+.L1598:
+	movq	-1096(%rbp), %rax
+	movl	8(%rax), %eax
+	cmpl	%eax, -1128(%rbp)
+	jge	.L1636
+	cmpl	$2, -1128(%rbp)
+	jle	.L1599
+	jmp	.L1636
+.L1557:
+	cmpl	$0, -1132(%rbp)
+	je	.L1600
+	leaq	.LC70(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1600:
+	cmpb	$0, -1154(%rbp)
+	je	.L1601
+	cmpl	$0, -1136(%rbp)
+	jne	.L1601
+	leaq	.LC76(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1601:
+	cmpl	$2, -1180(%rbp)
+	jne	.L1602
+	cmpb	$0, -1154(%rbp)
+	je	.L1603
+	movzbl	-1154(%rbp), %edx
+	movq	-1096(%rbp), %rax
+	movl	%edx, 8(%rax)
+.L1603:
+	movl	$1, %eax
+	jmp	.L1633
+.L1602:
+	movl	-1076(%rbp), %eax
+	cmpl	$1073741824, %eax
+	jbe	.L1604
+	leaq	.LC77(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1604:
+	movl	-1076(%rbp), %edx
+	movl	-1148(%rbp), %eax
+	addl	%edx, %eax
+	movl	%eax, %edx
+	movl	-1148(%rbp), %eax
+	cmpl	%eax, %edx
+	jge	.L1605
+	movl	$0, %eax
+	jmp	.L1633
+.L1605:
+	movl	-1076(%rbp), %edx
+	movl	-1148(%rbp), %eax
+	addl	%edx, %eax
+	cmpl	%eax, -1144(%rbp)
+	jnb	.L1606
+	movl	-1144(%rbp), %eax
+	movl	%eax, -1100(%rbp)
+	cmpl	$0, -1144(%rbp)
+	jne	.L1608
+	movl	-1076(%rbp), %eax
+	movl	$4096, %edx
+	cmpl	%edx, %eax
+	cmovb	%edx, %eax
+	movl	%eax, -1144(%rbp)
+	jmp	.L1608
+.L1609:
+	sall	-1144(%rbp)
+.L1608:
+	movl	-1076(%rbp), %edx
+	movl	-1148(%rbp), %eax
+	addl	%edx, %eax
+	cmpl	%eax, -1144(%rbp)
+	jb	.L1609
+	movl	-1144(%rbp), %edx
+	movq	-1176(%rbp), %rax
+	movq	8(%rax), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	realloc@PLT
+	movq	%rax, -1088(%rbp)
+	cmpq	$0, -1088(%rbp)
+	jne	.L1610
+	leaq	.LC37(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1610:
+	movq	-1176(%rbp), %rax
+	movq	-1088(%rbp), %rdx
+	movq	%rdx, 8(%rax)
+.L1606:
+	movl	-1076(%rbp), %eax
+	movl	%eax, %esi
+	movq	-1176(%rbp), %rax
+	movq	8(%rax), %rdx
+	movl	-1148(%rbp), %eax
+	leaq	(%rdx,%rax), %rcx
+	movq	-1096(%rbp), %rax
+	movl	%esi, %edx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__getn
+	testl	%eax, %eax
+	jne	.L1611
+	leaq	.LC78(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1611:
+	movl	-1076(%rbp), %eax
+	addl	%eax, -1148(%rbp)
+	jmp	.L1558
+.L1555:
+	cmpl	$0, -1132(%rbp)
+	je	.L1612
+	leaq	.LC70(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1612:
+	cmpl	$0, -1180(%rbp)
+	je	.L1614
+	movl	$1, %eax
+	jmp	.L1633
+.L1614:
+	movq	-1176(%rbp), %rax
+	movq	8(%rax), %rax
+	testq	%rax, %rax
+	jne	.L1615
+	leaq	.LC79(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1615:
+	movq	-1096(%rbp), %rax
+	movl	(%rax), %eax
+	movq	-1176(%rbp), %rdx
+	movl	32(%rdx), %edx
+	imull	%edx, %eax
+	addl	$7, %eax
+	shrl	$3, %eax
+	movl	%eax, -1104(%rbp)
+	movq	-1096(%rbp), %rax
+	movl	4(%rax), %eax
+	imull	-1104(%rbp), %eax
+	movq	-1096(%rbp), %rdx
+	movl	8(%rdx), %edx
+	imull	%eax, %edx
+	movq	-1096(%rbp), %rax
+	movl	4(%rax), %eax
+	addl	%edx, %eax
+	movl	%eax, -1152(%rbp)
+	cmpl	$0, -1116(%rbp)
+	sete	%al
+	movzbl	%al, %ecx
+	movl	-1152(%rbp), %eax
+	movl	%eax, %edi
+	movl	-1148(%rbp), %esi
+	movq	-1176(%rbp), %rax
+	movq	8(%rax), %rax
+	leaq	-1152(%rbp), %rdx
+	movl	%ecx, %r8d
+	movq	%rdx, %rcx
+	movl	%edi, %edx
+	movq	%rax, %rdi
+	call	stbi_zlib_decode_malloc_guesssize_headerflag
+	movq	-1176(%rbp), %rdx
+	movq	%rax, 16(%rdx)
+	movq	-1176(%rbp), %rax
+	movq	16(%rax), %rax
+	testq	%rax, %rax
+	jne	.L1616
+	movl	$0, %eax
+	jmp	.L1633
+.L1616:
+	movq	-1176(%rbp), %rax
+	movq	8(%rax), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-1176(%rbp), %rax
+	movq	$0, 8(%rax)
+	movq	-1096(%rbp), %rax
+	movl	8(%rax), %eax
+	addl	$1, %eax
+	cmpl	%eax, -1184(%rbp)
+	jne	.L1617
+	cmpl	$3, -1184(%rbp)
+	je	.L1617
+	cmpb	$0, -1154(%rbp)
+	je	.L1618
+.L1617:
+	cmpb	$0, -1153(%rbp)
+	je	.L1619
+.L1618:
+	movq	-1096(%rbp), %rax
+	movl	8(%rax), %eax
+	leal	1(%rax), %edx
+	movq	-1096(%rbp), %rax
+	movl	%edx, 12(%rax)
+	jmp	.L1620
+.L1619:
+	movq	-1096(%rbp), %rax
+	movl	8(%rax), %edx
+	movq	-1096(%rbp), %rax
+	movl	%edx, 12(%rax)
+.L1620:
+	movq	-1176(%rbp), %rax
+	movl	32(%rax), %r8d
+	movq	-1096(%rbp), %rax
+	movl	12(%rax), %ecx
+	movl	-1152(%rbp), %edx
+	movq	-1176(%rbp), %rax
+	movq	16(%rax), %rsi
+	movl	-1120(%rbp), %r9d
+	movq	-1176(%rbp), %rax
+	subq	$8, %rsp
+	movl	-1124(%rbp), %edi
+	pushq	%rdi
+	movq	%rax, %rdi
+	call	stbi__create_png_image
+	addq	$16, %rsp
+	testl	%eax, %eax
+	jne	.L1621
+	movl	$0, %eax
+	jmp	.L1633
+.L1621:
+	cmpb	$0, -1153(%rbp)
+	je	.L1622
+	movq	-1176(%rbp), %rax
+	movl	32(%rax), %eax
+	cmpl	$16, %eax
+	jne	.L1623
+	movq	-1096(%rbp), %rax
+	movl	12(%rax), %edx
+	leaq	-1066(%rbp), %rcx
+	movq	-1176(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__compute_transparency16
+	testl	%eax, %eax
+	jne	.L1622
+	movl	$0, %eax
+	jmp	.L1633
+.L1623:
+	movq	-1096(%rbp), %rax
+	movl	12(%rax), %edx
+	leaq	-1059(%rbp), %rcx
+	movq	-1176(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__compute_transparency
+	testl	%eax, %eax
+	jne	.L1622
+	movl	$0, %eax
+	jmp	.L1633
+.L1622:
+	cmpl	$0, -1116(%rbp)
+	je	.L1624
+	movl	%fs:stbi__de_iphone_flag_set@tpoff, %eax
+	testl	%eax, %eax
+	je	.L1625
+	movl	%fs:stbi__de_iphone_flag_local@tpoff, %eax
+	testl	%eax, %eax
+	setne	%al
+	jmp	.L1626
+.L1625:
+	movl	stbi__de_iphone_flag_global(%rip), %eax
+	testl	%eax, %eax
+	setne	%al
+.L1626:
+	testb	%al, %al
+	je	.L1624
+	movq	-1096(%rbp), %rax
+	movl	12(%rax), %eax
+	cmpl	$2, %eax
+	jle	.L1624
+	movq	-1176(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__de_iphone
+.L1624:
+	cmpb	$0, -1154(%rbp)
+	je	.L1627
+	movzbl	-1154(%rbp), %edx
+	movq	-1096(%rbp), %rax
+	movl	%edx, 8(%rax)
+	movzbl	-1154(%rbp), %edx
+	movq	-1096(%rbp), %rax
+	movl	%edx, 12(%rax)
+	cmpl	$2, -1184(%rbp)
+	jle	.L1628
+	movq	-1096(%rbp), %rax
+	movl	-1184(%rbp), %edx
+	movl	%edx, 12(%rax)
+.L1628:
+	movq	-1096(%rbp), %rax
+	movl	12(%rax), %ecx
+	movl	-1136(%rbp), %edx
+	leaq	-1056(%rbp), %rsi
+	movq	-1176(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__expand_png_palette
+	testl	%eax, %eax
+	jne	.L1629
+	movl	$0, %eax
+	jmp	.L1633
+.L1627:
+	cmpb	$0, -1153(%rbp)
+	je	.L1629
+	movq	-1096(%rbp), %rax
+	movl	8(%rax), %eax
+	leal	1(%rax), %edx
+	movq	-1096(%rbp), %rax
+	movl	%edx, 8(%rax)
+.L1629:
+	movq	-1176(%rbp), %rax
+	movq	16(%rax), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-1176(%rbp), %rax
+	movq	$0, 16(%rax)
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	movl	$1, %eax
+	jmp	.L1633
+.L1552:
+	cmpl	$0, -1132(%rbp)
+	je	.L1630
+	leaq	.LC70(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1630:
+	movl	-1072(%rbp), %eax
+	andl	$536870912, %eax
+	testl	%eax, %eax
+	jne	.L1631
+	movl	-1072(%rbp), %eax
+	shrl	$24, %eax
+	movb	%al, invalid_chunk.4(%rip)
+	movl	-1072(%rbp), %eax
+	shrl	$16, %eax
+	movb	%al, 1+invalid_chunk.4(%rip)
+	movl	-1072(%rbp), %eax
+	shrl	$8, %eax
+	movb	%al, 2+invalid_chunk.4(%rip)
+	movl	-1072(%rbp), %eax
+	movb	%al, 3+invalid_chunk.4(%rip)
+	leaq	invalid_chunk.4(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L1633
+.L1631:
+	movl	-1076(%rbp), %eax
+	movl	%eax, %edx
+	movq	-1096(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	jmp	.L1558
+.L1635:
+	nop
+	jmp	.L1558
+.L1636:
+	nop
+.L1558:
+	movq	-1096(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	jmp	.L1550
+.L1633:
+	movq	-24(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L1634
+	call	__stack_chk_fail@PLT
+.L1634:
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5026:
+	.size	stbi__parse_png_file, .-stbi__parse_png_file
+	.type	stbi__do_png, @function
+stbi__do_png:
+.LFB5027:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movq	%rcx, -48(%rbp)
+	movl	%r8d, -52(%rbp)
+	movq	%r9, -64(%rbp)
+	movq	$0, -8(%rbp)
+	cmpl	$0, -52(%rbp)
+	js	.L1638
+	cmpl	$4, -52(%rbp)
+	jle	.L1639
+.L1638:
+	movl	$0, %eax
+	jmp	.L1640
+.L1639:
+	movl	-52(%rbp), %edx
+	movq	-24(%rbp), %rax
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	stbi__parse_png_file
+	testl	%eax, %eax
+	je	.L1641
+	movq	-24(%rbp), %rax
+	movl	32(%rax), %eax
+	cmpl	$8, %eax
+	jg	.L1642
+	movq	-64(%rbp), %rax
+	movl	$8, (%rax)
+	jmp	.L1643
+.L1642:
+	movq	-24(%rbp), %rax
+	movl	32(%rax), %eax
+	cmpl	$16, %eax
+	jne	.L1644
+	movq	-64(%rbp), %rax
+	movl	$16, (%rax)
+	jmp	.L1643
+.L1644:
+	movl	$0, %eax
+	jmp	.L1640
+.L1643:
+	movq	-24(%rbp), %rax
+	movq	24(%rax), %rax
+	movq	%rax, -8(%rbp)
+	movq	-24(%rbp), %rax
+	movq	$0, 24(%rax)
+	cmpl	$0, -52(%rbp)
+	je	.L1645
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movl	12(%rax), %eax
+	cmpl	%eax, -52(%rbp)
+	je	.L1645
+	movq	-64(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	$8, %eax
+	jne	.L1646
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movl	4(%rax), %edi
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %ecx
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movl	12(%rax), %esi
+	movl	-52(%rbp), %edx
+	movq	-8(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__convert_format
+	movq	%rax, -8(%rbp)
+	jmp	.L1647
+.L1646:
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movl	4(%rax), %edi
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %ecx
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movl	12(%rax), %esi
+	movl	-52(%rbp), %edx
+	movq	-8(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__convert_format16
+	movq	%rax, -8(%rbp)
+.L1647:
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movl	-52(%rbp), %edx
+	movl	%edx, 12(%rax)
+	cmpq	$0, -8(%rbp)
+	jne	.L1645
+	movq	-8(%rbp), %rax
+	jmp	.L1640
+.L1645:
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	movl	%eax, %edx
+	movq	-32(%rbp), %rax
+	movl	%edx, (%rax)
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movl	4(%rax), %eax
+	movl	%eax, %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, (%rax)
+	cmpq	$0, -48(%rbp)
+	je	.L1641
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %edx
+	movq	-48(%rbp), %rax
+	movl	%edx, (%rax)
+.L1641:
+	movq	-24(%rbp), %rax
+	movq	24(%rax), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-24(%rbp), %rax
+	movq	$0, 24(%rax)
+	movq	-24(%rbp), %rax
+	movq	16(%rax), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-24(%rbp), %rax
+	movq	$0, 16(%rax)
+	movq	-24(%rbp), %rax
+	movq	8(%rax), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-24(%rbp), %rax
+	movq	$0, 8(%rax)
+	movq	-8(%rbp), %rax
+.L1640:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5027:
+	.size	stbi__do_png, .-stbi__do_png
+	.type	stbi__png_load, @function
+stbi__png_load:
+.LFB5028:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$96, %rsp
+	movq	%rdi, -56(%rbp)
+	movq	%rsi, -64(%rbp)
+	movq	%rdx, -72(%rbp)
+	movq	%rcx, -80(%rbp)
+	movl	%r8d, -84(%rbp)
+	movq	%r9, -96(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-56(%rbp), %rax
+	movq	%rax, -48(%rbp)
+	movq	-96(%rbp), %r8
+	movl	-84(%rbp), %edi
+	movq	-80(%rbp), %rcx
+	movq	-72(%rbp), %rdx
+	movq	-64(%rbp), %rsi
+	leaq	-48(%rbp), %rax
+	movq	%r8, %r9
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__do_png
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L1650
+	call	__stack_chk_fail@PLT
+.L1650:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5028:
+	.size	stbi__png_load, .-stbi__png_load
+	.type	stbi__png_test, @function
+stbi__png_test:
+.LFB5029:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__check_png_header
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	-4(%rbp), %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5029:
+	.size	stbi__png_test, .-stbi__png_test
+	.type	stbi__png_info_raw, @function
+stbi__png_info_raw:
+.LFB5030:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	%rsi, -16(%rbp)
+	movq	%rdx, -24(%rbp)
+	movq	%rcx, -32(%rbp)
+	movq	-8(%rbp), %rax
+	movl	$0, %edx
+	movl	$2, %esi
+	movq	%rax, %rdi
+	call	stbi__parse_png_file
+	testl	%eax, %eax
+	jne	.L1654
+	movq	-8(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L1655
+.L1654:
+	cmpq	$0, -16(%rbp)
+	je	.L1656
+	movq	-8(%rbp), %rax
+	movq	(%rax), %rax
+	movl	(%rax), %eax
+	movl	%eax, %edx
+	movq	-16(%rbp), %rax
+	movl	%edx, (%rax)
+.L1656:
+	cmpq	$0, -24(%rbp)
+	je	.L1657
+	movq	-8(%rbp), %rax
+	movq	(%rax), %rax
+	movl	4(%rax), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, (%rax)
+.L1657:
+	cmpq	$0, -32(%rbp)
+	je	.L1658
+	movq	-8(%rbp), %rax
+	movq	(%rax), %rax
+	movl	8(%rax), %edx
+	movq	-32(%rbp), %rax
+	movl	%edx, (%rax)
+.L1658:
+	movl	$1, %eax
+.L1655:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5030:
+	.size	stbi__png_info_raw, .-stbi__png_info_raw
+	.type	stbi__png_info, @function
+stbi__png_info:
+.LFB5031:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$80, %rsp
+	movq	%rdi, -56(%rbp)
+	movq	%rsi, -64(%rbp)
+	movq	%rdx, -72(%rbp)
+	movq	%rcx, -80(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-56(%rbp), %rax
+	movq	%rax, -48(%rbp)
+	movq	-80(%rbp), %rcx
+	movq	-72(%rbp), %rdx
+	movq	-64(%rbp), %rsi
+	leaq	-48(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__png_info_raw
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L1661
+	call	__stack_chk_fail@PLT
+.L1661:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5031:
+	.size	stbi__png_info, .-stbi__png_info
+	.type	stbi__png_is16, @function
+stbi__png_is16:
+.LFB5032:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -56(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-56(%rbp), %rax
+	movq	%rax, -48(%rbp)
+	leaq	-48(%rbp), %rax
+	movl	$0, %ecx
+	movl	$0, %edx
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	stbi__png_info_raw
+	testl	%eax, %eax
+	jne	.L1663
+	movl	$0, %eax
+	jmp	.L1666
+.L1663:
+	movl	-16(%rbp), %eax
+	cmpl	$16, %eax
+	je	.L1665
+	movq	-48(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L1666
+.L1665:
+	movl	$1, %eax
+.L1666:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L1667
+	call	__stack_chk_fail@PLT
+.L1667:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5032:
+	.size	stbi__png_is16, .-stbi__png_is16
+	.type	stbi__bmp_test_raw, @function
+stbi__bmp_test_raw:
+.LFB5033:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	cmpb	$66, %al
+	je	.L1669
+	movl	$0, %eax
+	jmp	.L1670
+.L1669:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	cmpb	$77, %al
+	je	.L1671
+	movl	$0, %eax
+	jmp	.L1670
+.L1671:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movl	%eax, -8(%rbp)
+	cmpl	$12, -8(%rbp)
+	je	.L1672
+	cmpl	$40, -8(%rbp)
+	je	.L1672
+	cmpl	$56, -8(%rbp)
+	je	.L1672
+	cmpl	$108, -8(%rbp)
+	je	.L1672
+	cmpl	$124, -8(%rbp)
+	jne	.L1673
+.L1672:
+	movl	$1, %eax
+	jmp	.L1674
+.L1673:
+	movl	$0, %eax
+.L1674:
+	movl	%eax, -4(%rbp)
+	movl	-4(%rbp), %eax
+.L1670:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5033:
+	.size	stbi__bmp_test_raw, .-stbi__bmp_test_raw
+	.type	stbi__bmp_test, @function
+stbi__bmp_test:
+.LFB5034:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__bmp_test_raw
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	-4(%rbp), %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5034:
+	.size	stbi__bmp_test, .-stbi__bmp_test
+	.type	stbi__high_bit, @function
+stbi__high_bit:
+.LFB5035:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -20(%rbp)
+	movl	$0, -4(%rbp)
+	cmpl	$0, -20(%rbp)
+	jne	.L1678
+	movl	$-1, %eax
+	jmp	.L1679
+.L1678:
+	cmpl	$65535, -20(%rbp)
+	jbe	.L1680
+	addl	$16, -4(%rbp)
+	shrl	$16, -20(%rbp)
+.L1680:
+	cmpl	$255, -20(%rbp)
+	jbe	.L1681
+	addl	$8, -4(%rbp)
+	shrl	$8, -20(%rbp)
+.L1681:
+	cmpl	$15, -20(%rbp)
+	jbe	.L1682
+	addl	$4, -4(%rbp)
+	shrl	$4, -20(%rbp)
+.L1682:
+	cmpl	$3, -20(%rbp)
+	jbe	.L1683
+	addl	$2, -4(%rbp)
+	shrl	$2, -20(%rbp)
+.L1683:
+	cmpl	$1, -20(%rbp)
+	jbe	.L1684
+	addl	$1, -4(%rbp)
+.L1684:
+	movl	-4(%rbp), %eax
+.L1679:
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5035:
+	.size	stbi__high_bit, .-stbi__high_bit
+	.type	stbi__bitcount, @function
+stbi__bitcount:
+.LFB5036:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -4(%rbp)
+	movl	-4(%rbp), %eax
+	andl	$1431655765, %eax
+	movl	%eax, %edx
+	movl	-4(%rbp), %eax
+	shrl	%eax
+	andl	$1431655765, %eax
+	addl	%edx, %eax
+	movl	%eax, -4(%rbp)
+	movl	-4(%rbp), %eax
+	andl	$858993459, %eax
+	movl	%eax, %edx
+	movl	-4(%rbp), %eax
+	shrl	$2, %eax
+	andl	$858993459, %eax
+	addl	%edx, %eax
+	movl	%eax, -4(%rbp)
+	movl	-4(%rbp), %eax
+	shrl	$4, %eax
+	movl	%eax, %edx
+	movl	-4(%rbp), %eax
+	addl	%edx, %eax
+	andl	$252645135, %eax
+	movl	%eax, -4(%rbp)
+	movl	-4(%rbp), %eax
+	shrl	$8, %eax
+	addl	%eax, -4(%rbp)
+	movl	-4(%rbp), %eax
+	shrl	$16, %eax
+	addl	%eax, -4(%rbp)
+	movl	-4(%rbp), %eax
+	movzbl	%al, %eax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5036:
+	.size	stbi__bitcount, .-stbi__bitcount
+	.section	.rodata
+.LC80:
+	.string	"v < 256"
+.LC81:
+	.string	"bits >= 0 && bits <= 8"
+	.text
+	.type	stbi__shiftsigned, @function
+stbi__shiftsigned:
+.LFB5037:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movl	%edi, -4(%rbp)
+	movl	%esi, -8(%rbp)
+	movl	%edx, -12(%rbp)
+	cmpl	$0, -8(%rbp)
+	jns	.L1688
+	movl	-8(%rbp), %eax
+	negl	%eax
+	movl	%eax, %ecx
+	sall	%cl, -4(%rbp)
+	jmp	.L1689
+.L1688:
+	movl	-8(%rbp), %eax
+	movl	%eax, %ecx
+	shrl	%cl, -4(%rbp)
+.L1689:
+	cmpl	$255, -4(%rbp)
+	jbe	.L1690
+	leaq	__PRETTY_FUNCTION__.3(%rip), %rax
+	movq	%rax, %rcx
+	movl	$5409, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC80(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L1690:
+	movl	$8, %eax
+	subl	-12(%rbp), %eax
+	movl	%eax, %ecx
+	shrl	%cl, -4(%rbp)
+	cmpl	$0, -12(%rbp)
+	js	.L1691
+	cmpl	$8, -12(%rbp)
+	jle	.L1694
+.L1691:
+	leaq	__PRETTY_FUNCTION__.3(%rip), %rax
+	movq	%rax, %rcx
+	movl	$5411, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC81(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L1694:
+	movl	-12(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	leaq	mul_table.2(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	imull	-4(%rbp), %eax
+	movl	%eax, %esi
+	movl	-12(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	leaq	shift_table.1(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	movl	%eax, %ecx
+	sarl	%cl, %esi
+	movl	%esi, %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5037:
+	.size	stbi__shiftsigned, .-stbi__shiftsigned
+	.type	stbi__bmp_set_mask_defaults, @function
+stbi__bmp_set_mask_defaults:
+.LFB5038:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -8(%rbp)
+	movl	%esi, -12(%rbp)
+	cmpl	$3, -12(%rbp)
+	jne	.L1696
+	movl	$1, %eax
+	jmp	.L1697
+.L1696:
+	cmpl	$0, -12(%rbp)
+	jne	.L1698
+	movq	-8(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	$16, %eax
+	jne	.L1699
+	movq	-8(%rbp), %rax
+	movl	$31744, 12(%rax)
+	movq	-8(%rbp), %rax
+	movl	$992, 16(%rax)
+	movq	-8(%rbp), %rax
+	movl	$31, 20(%rax)
+	jmp	.L1700
+.L1699:
+	movq	-8(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	$32, %eax
+	jne	.L1701
+	movq	-8(%rbp), %rax
+	movl	$16711680, 12(%rax)
+	movq	-8(%rbp), %rax
+	movl	$65280, 16(%rax)
+	movq	-8(%rbp), %rax
+	movl	$255, 20(%rax)
+	movq	-8(%rbp), %rax
+	movl	$-16777216, 24(%rax)
+	movq	-8(%rbp), %rax
+	movl	$0, 28(%rax)
+	jmp	.L1700
+.L1701:
+	movq	-8(%rbp), %rax
+	movl	$0, 24(%rax)
+	movq	-8(%rbp), %rax
+	movl	24(%rax), %edx
+	movq	-8(%rbp), %rax
+	movl	%edx, 20(%rax)
+	movq	-8(%rbp), %rax
+	movl	20(%rax), %edx
+	movq	-8(%rbp), %rax
+	movl	%edx, 16(%rax)
+	movq	-8(%rbp), %rax
+	movl	16(%rax), %edx
+	movq	-8(%rbp), %rax
+	movl	%edx, 12(%rax)
+.L1700:
+	movl	$1, %eax
+	jmp	.L1697
+.L1698:
+	movl	$0, %eax
+.L1697:
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5038:
+	.size	stbi__bmp_set_mask_defaults, .-stbi__bmp_set_mask_defaults
+	.type	stbi__bmp_parse_header, @function
+stbi__bmp_parse_header:
+.LFB5039:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	cmpb	$66, %al
+	jne	.L1703
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	cmpb	$77, %al
+	je	.L1704
+.L1703:
+	movl	$0, %eax
+	jmp	.L1705
+.L1704:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movl	%eax, %edx
+	movq	-32(%rbp), %rax
+	movl	%edx, 4(%rax)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movl	%eax, -8(%rbp)
+	movq	-32(%rbp), %rax
+	movl	-8(%rbp), %edx
+	movl	%edx, 8(%rax)
+	movq	-32(%rbp), %rax
+	movl	$0, 24(%rax)
+	movq	-32(%rbp), %rax
+	movl	24(%rax), %edx
+	movq	-32(%rbp), %rax
+	movl	%edx, 20(%rax)
+	movq	-32(%rbp), %rax
+	movl	20(%rax), %edx
+	movq	-32(%rbp), %rax
+	movl	%edx, 16(%rax)
+	movq	-32(%rbp), %rax
+	movl	16(%rax), %edx
+	movq	-32(%rbp), %rax
+	movl	%edx, 12(%rax)
+	movq	-32(%rbp), %rax
+	movl	$14, 32(%rax)
+	movq	-32(%rbp), %rax
+	movl	4(%rax), %eax
+	testl	%eax, %eax
+	jns	.L1706
+	movl	$0, %eax
+	jmp	.L1705
+.L1706:
+	cmpl	$12, -8(%rbp)
+	je	.L1707
+	cmpl	$40, -8(%rbp)
+	je	.L1707
+	cmpl	$56, -8(%rbp)
+	je	.L1707
+	cmpl	$108, -8(%rbp)
+	je	.L1707
+	cmpl	$124, -8(%rbp)
+	je	.L1707
+	movl	$0, %eax
+	jmp	.L1705
+.L1707:
+	cmpl	$12, -8(%rbp)
+	jne	.L1708
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, (%rax)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	%edx, 4(%rax)
+	jmp	.L1709
+.L1708:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-24(%rbp), %rdx
+	movl	%eax, (%rdx)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-24(%rbp), %rdx
+	movl	%eax, 4(%rdx)
+.L1709:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	cmpl	$1, %eax
+	je	.L1710
+	movl	$0, %eax
+	jmp	.L1705
+.L1710:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movq	-32(%rbp), %rdx
+	movl	%eax, (%rdx)
+	cmpl	$12, -8(%rbp)
+	je	.L1711
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movl	%eax, -4(%rbp)
+	cmpl	$1, -4(%rbp)
+	je	.L1712
+	cmpl	$2, -4(%rbp)
+	jne	.L1713
+.L1712:
+	movl	$0, %eax
+	jmp	.L1705
+.L1713:
+	cmpl	$3, -4(%rbp)
+	jle	.L1714
+	movl	$0, %eax
+	jmp	.L1705
+.L1714:
+	cmpl	$3, -4(%rbp)
+	jne	.L1715
+	movq	-32(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	$16, %eax
+	je	.L1715
+	movq	-32(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	$32, %eax
+	je	.L1715
+	movl	$0, %eax
+	jmp	.L1705
+.L1715:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	cmpl	$40, -8(%rbp)
+	je	.L1716
+	cmpl	$56, -8(%rbp)
+	jne	.L1717
+.L1716:
+	cmpl	$56, -8(%rbp)
+	jne	.L1718
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+.L1718:
+	movq	-32(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	$16, %eax
+	je	.L1719
+	movq	-32(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	$32, %eax
+	jne	.L1727
+.L1719:
+	cmpl	$0, -4(%rbp)
+	jne	.L1721
+	movl	-4(%rbp), %edx
+	movq	-32(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__bmp_set_mask_defaults
+	jmp	.L1727
+.L1721:
+	cmpl	$3, -4(%rbp)
+	jne	.L1722
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-32(%rbp), %rdx
+	movl	%eax, 12(%rdx)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-32(%rbp), %rdx
+	movl	%eax, 16(%rdx)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-32(%rbp), %rdx
+	movl	%eax, 20(%rdx)
+	movq	-32(%rbp), %rax
+	movl	32(%rax), %eax
+	leal	12(%rax), %edx
+	movq	-32(%rbp), %rax
+	movl	%edx, 32(%rax)
+	movq	-32(%rbp), %rax
+	movl	12(%rax), %edx
+	movq	-32(%rbp), %rax
+	movl	16(%rax), %eax
+	cmpl	%eax, %edx
+	jne	.L1727
+	movq	-32(%rbp), %rax
+	movl	16(%rax), %edx
+	movq	-32(%rbp), %rax
+	movl	20(%rax), %eax
+	cmpl	%eax, %edx
+	jne	.L1727
+	movl	$0, %eax
+	jmp	.L1705
+.L1722:
+	movl	$0, %eax
+	jmp	.L1705
+.L1717:
+	cmpl	$108, -8(%rbp)
+	je	.L1723
+	cmpl	$124, -8(%rbp)
+	je	.L1723
+	movl	$0, %eax
+	jmp	.L1705
+.L1723:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-32(%rbp), %rdx
+	movl	%eax, 12(%rdx)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-32(%rbp), %rdx
+	movl	%eax, 16(%rdx)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-32(%rbp), %rdx
+	movl	%eax, 20(%rdx)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-32(%rbp), %rdx
+	movl	%eax, 24(%rdx)
+	cmpl	$3, -4(%rbp)
+	je	.L1724
+	movl	-4(%rbp), %edx
+	movq	-32(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__bmp_set_mask_defaults
+.L1724:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movl	$0, -12(%rbp)
+	jmp	.L1725
+.L1726:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	addl	$1, -12(%rbp)
+.L1725:
+	cmpl	$11, -12(%rbp)
+	jle	.L1726
+	cmpl	$124, -8(%rbp)
+	jne	.L1711
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+	jmp	.L1711
+.L1727:
+	nop
+.L1711:
+	movl	$1, %eax
+.L1705:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5039:
+	.size	stbi__bmp_parse_header, .-stbi__bmp_parse_header
+	.type	stbi__bmp_load, @function
+stbi__bmp_load:
+.LFB5040:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$1320, %rsp
+	.cfi_offset 3, -24
+	movq	%rdi, -1288(%rbp)
+	movq	%rsi, -1296(%rbp)
+	movq	%rdx, -1304(%rbp)
+	movq	%rcx, -1312(%rbp)
+	movl	%r8d, -1316(%rbp)
+	movq	%r9, -1328(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -24(%rbp)
+	xorl	%eax, %eax
+	movl	$0, -1180(%rbp)
+	movl	$0, -1176(%rbp)
+	movl	$0, -1172(%rbp)
+	movl	$0, -1168(%rbp)
+	movl	$0, -1260(%rbp)
+	movl	$255, -1076(%rbp)
+	leaq	-1104(%rbp), %rdx
+	movq	-1288(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__bmp_parse_header
+	testq	%rax, %rax
+	jne	.L1729
+	movl	$0, %eax
+	jmp	.L1816
+.L1729:
+	movq	-1288(%rbp), %rax
+	movl	4(%rax), %eax
+	testl	%eax, %eax
+	setg	%al
+	movzbl	%al, %eax
+	movl	%eax, -1164(%rbp)
+	movq	-1288(%rbp), %rax
+	movl	4(%rax), %eax
+	movl	%eax, %edx
+	negl	%edx
+	cmovns	%edx, %eax
+	movl	%eax, %edx
+	movq	-1288(%rbp), %rax
+	movl	%edx, 4(%rax)
+	movq	-1288(%rbp), %rax
+	movl	4(%rax), %eax
+	cmpl	$16777216, %eax
+	jbe	.L1731
+	movl	$0, %eax
+	jmp	.L1816
+.L1731:
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	$16777216, %eax
+	jbe	.L1732
+	movl	$0, %eax
+	jmp	.L1816
+.L1732:
+	movl	-1092(%rbp), %eax
+	movl	%eax, -1180(%rbp)
+	movl	-1088(%rbp), %eax
+	movl	%eax, -1176(%rbp)
+	movl	-1084(%rbp), %eax
+	movl	%eax, -1172(%rbp)
+	movl	-1080(%rbp), %eax
+	movl	%eax, -1168(%rbp)
+	movl	-1076(%rbp), %eax
+	movl	%eax, -1264(%rbp)
+	movl	-1096(%rbp), %eax
+	cmpl	$12, %eax
+	jne	.L1733
+	movl	-1104(%rbp), %eax
+	cmpl	$23, %eax
+	jg	.L1734
+	movl	-1100(%rbp), %edx
+	movl	-1072(%rbp), %eax
+	subl	%eax, %edx
+	leal	-24(%rdx), %eax
+	movslq	%eax, %rdx
+	imulq	$1431655766, %rdx, %rdx
+	movq	%rdx, %rcx
+	shrq	$32, %rcx
+	cltd
+	movl	%ecx, %eax
+	subl	%edx, %eax
+	movl	%eax, -1260(%rbp)
+	jmp	.L1734
+.L1733:
+	movl	-1104(%rbp), %eax
+	cmpl	$15, %eax
+	jg	.L1734
+	movl	-1100(%rbp), %edx
+	movl	-1072(%rbp), %eax
+	subl	%eax, %edx
+	movl	-1096(%rbp), %eax
+	subl	%eax, %edx
+	movl	%edx, %eax
+	sarl	$2, %eax
+	movl	%eax, -1260(%rbp)
+.L1734:
+	cmpl	$0, -1260(%rbp)
+	jne	.L1735
+	movq	-1288(%rbp), %rax
+	movl	184(%rax), %edx
+	movq	-1288(%rbp), %rax
+	movq	192(%rax), %rcx
+	movq	-1288(%rbp), %rax
+	movq	208(%rax), %rax
+	subq	%rax, %rcx
+	movl	%ecx, %eax
+	addl	%edx, %eax
+	movl	%eax, -1160(%rbp)
+	movl	$1024, -1156(%rbp)
+	movl	$1024, -1152(%rbp)
+	cmpl	$0, -1160(%rbp)
+	jle	.L1736
+	movl	-1160(%rbp), %eax
+	cmpl	-1156(%rbp), %eax
+	jle	.L1737
+.L1736:
+	movl	$0, %eax
+	jmp	.L1816
+.L1737:
+	movl	-1100(%rbp), %eax
+	cmpl	%eax, -1160(%rbp)
+	jg	.L1738
+	movl	-1100(%rbp), %eax
+	subl	-1160(%rbp), %eax
+	cmpl	%eax, -1152(%rbp)
+	jge	.L1739
+.L1738:
+	movl	$0, %eax
+	jmp	.L1816
+.L1739:
+	movl	-1100(%rbp), %eax
+	subl	-1160(%rbp), %eax
+	movl	%eax, %edx
+	movq	-1288(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+.L1735:
+	movl	-1104(%rbp), %eax
+	cmpl	$24, %eax
+	jne	.L1740
+	cmpl	$-16777216, -1168(%rbp)
+	jne	.L1740
+	movq	-1288(%rbp), %rax
+	movl	$3, 8(%rax)
+	jmp	.L1741
+.L1740:
+	cmpl	$0, -1168(%rbp)
+	je	.L1742
+	movl	$4, %edx
+	jmp	.L1743
+.L1742:
+	movl	$3, %edx
+.L1743:
+	movq	-1288(%rbp), %rax
+	movl	%edx, 8(%rax)
+.L1741:
+	cmpl	$0, -1316(%rbp)
+	je	.L1744
+	cmpl	$2, -1316(%rbp)
+	jle	.L1744
+	movl	-1316(%rbp), %eax
+	movl	%eax, -1244(%rbp)
+	jmp	.L1745
+.L1744:
+	movq	-1288(%rbp), %rax
+	movl	8(%rax), %eax
+	movl	%eax, -1244(%rbp)
+.L1745:
+	movq	-1288(%rbp), %rax
+	movl	4(%rax), %eax
+	movl	%eax, %edx
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %eax
+	movl	%eax, %esi
+	movl	-1244(%rbp), %eax
+	movl	$0, %ecx
+	movl	%eax, %edi
+	call	stbi__mad3sizes_valid
+	testl	%eax, %eax
+	jne	.L1746
+	movl	$0, %eax
+	jmp	.L1816
+.L1746:
+	movq	-1288(%rbp), %rax
+	movl	4(%rax), %eax
+	movl	%eax, %edx
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %eax
+	movl	%eax, %esi
+	movl	-1244(%rbp), %eax
+	movl	$0, %ecx
+	movl	%eax, %edi
+	call	stbi__malloc_mad3
+	movq	%rax, -1128(%rbp)
+	cmpq	$0, -1128(%rbp)
+	jne	.L1747
+	movl	$0, %eax
+	jmp	.L1816
+.L1747:
+	movl	-1104(%rbp), %eax
+	cmpl	$15, %eax
+	jg	.L1748
+	movl	$0, -1240(%rbp)
+	cmpl	$0, -1260(%rbp)
+	je	.L1749
+	cmpl	$256, -1260(%rbp)
+	jle	.L1750
+.L1749:
+	movq	-1128(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L1816
+.L1750:
+	movl	$0, -1256(%rbp)
+	jmp	.L1751
+.L1753:
+	movq	-1288(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movl	-1256(%rbp), %edx
+	movslq	%edx, %rdx
+	movb	%al, -1054(%rbp,%rdx,4)
+	movq	-1288(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movl	-1256(%rbp), %edx
+	movslq	%edx, %rdx
+	movb	%al, -1055(%rbp,%rdx,4)
+	movq	-1288(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movl	-1256(%rbp), %edx
+	movslq	%edx, %rdx
+	movb	%al, -1056(%rbp,%rdx,4)
+	movl	-1096(%rbp), %eax
+	cmpl	$12, %eax
+	je	.L1752
+	movq	-1288(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+.L1752:
+	movl	-1256(%rbp), %eax
+	cltq
+	movb	$-1, -1053(%rbp,%rax,4)
+	addl	$1, -1256(%rbp)
+.L1751:
+	movl	-1256(%rbp), %eax
+	cmpl	-1260(%rbp), %eax
+	jl	.L1753
+	movl	-1100(%rbp), %edx
+	movl	-1072(%rbp), %eax
+	subl	%eax, %edx
+	movl	-1096(%rbp), %eax
+	subl	%eax, %edx
+	movl	-1096(%rbp), %eax
+	cmpl	$12, %eax
+	jne	.L1754
+	movl	$3, %eax
+	jmp	.L1755
+.L1754:
+	movl	$4, %eax
+.L1755:
+	imull	-1260(%rbp), %eax
+	subl	%eax, %edx
+	movq	-1288(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movl	-1104(%rbp), %eax
+	cmpl	$1, %eax
+	jne	.L1756
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %eax
+	addl	$7, %eax
+	shrl	$3, %eax
+	movl	%eax, -1248(%rbp)
+	jmp	.L1757
+.L1756:
+	movl	-1104(%rbp), %eax
+	cmpl	$4, %eax
+	jne	.L1758
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %eax
+	addl	$1, %eax
+	shrl	%eax
+	movl	%eax, -1248(%rbp)
+	jmp	.L1757
+.L1758:
+	movl	-1104(%rbp), %eax
+	cmpl	$8, %eax
+	jne	.L1759
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %eax
+	movl	%eax, -1248(%rbp)
+	jmp	.L1757
+.L1759:
+	movq	-1128(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L1816
+.L1757:
+	movl	-1248(%rbp), %eax
+	negl	%eax
+	andl	$3, %eax
+	movl	%eax, -1148(%rbp)
+	movl	-1104(%rbp), %eax
+	cmpl	$1, %eax
+	jne	.L1760
+	movl	$0, -1252(%rbp)
+	jmp	.L1761
+.L1768:
+	movl	$7, -1236(%rbp)
+	movq	-1288(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -1232(%rbp)
+	movl	$0, -1256(%rbp)
+	jmp	.L1762
+.L1767:
+	movl	-1236(%rbp), %eax
+	movl	-1232(%rbp), %edx
+	movl	%eax, %ecx
+	sarl	%cl, %edx
+	movl	%edx, %eax
+	andl	$1, %eax
+	movl	%eax, -1132(%rbp)
+	movl	-1240(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1240(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rax, %rdx
+	movl	-1132(%rbp), %eax
+	cltq
+	movzbl	-1056(%rbp,%rax,4), %eax
+	movb	%al, (%rdx)
+	movl	-1240(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1240(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rax, %rdx
+	movl	-1132(%rbp), %eax
+	cltq
+	movzbl	-1055(%rbp,%rax,4), %eax
+	movb	%al, (%rdx)
+	movl	-1240(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1240(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rax, %rdx
+	movl	-1132(%rbp), %eax
+	cltq
+	movzbl	-1054(%rbp,%rax,4), %eax
+	movb	%al, (%rdx)
+	cmpl	$4, -1244(%rbp)
+	jne	.L1763
+	movl	-1240(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1240(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rdx, %rax
+	movb	$-1, (%rax)
+.L1763:
+	movl	-1256(%rbp), %eax
+	leal	1(%rax), %edx
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, %edx
+	je	.L1818
+	subl	$1, -1236(%rbp)
+	cmpl	$0, -1236(%rbp)
+	jns	.L1766
+	movl	$7, -1236(%rbp)
+	movq	-1288(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -1232(%rbp)
+.L1766:
+	addl	$1, -1256(%rbp)
+.L1762:
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -1256(%rbp)
+	jl	.L1767
+	jmp	.L1765
+.L1818:
+	nop
+.L1765:
+	movl	-1148(%rbp), %edx
+	movq	-1288(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	addl	$1, -1252(%rbp)
+.L1761:
+	movq	-1288(%rbp), %rax
+	movl	4(%rax), %eax
+	cmpl	%eax, -1252(%rbp)
+	jl	.L1768
+	jmp	.L1769
+.L1760:
+	movl	$0, -1252(%rbp)
+	jmp	.L1770
+.L1780:
+	movl	$0, -1256(%rbp)
+	jmp	.L1771
+.L1779:
+	movq	-1288(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -1228(%rbp)
+	movl	$0, -1224(%rbp)
+	movl	-1104(%rbp), %eax
+	cmpl	$4, %eax
+	jne	.L1772
+	movl	-1228(%rbp), %eax
+	andl	$15, %eax
+	movl	%eax, -1224(%rbp)
+	sarl	$4, -1228(%rbp)
+.L1772:
+	movl	-1240(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1240(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rax, %rdx
+	movl	-1228(%rbp), %eax
+	cltq
+	movzbl	-1056(%rbp,%rax,4), %eax
+	movb	%al, (%rdx)
+	movl	-1240(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1240(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rax, %rdx
+	movl	-1228(%rbp), %eax
+	cltq
+	movzbl	-1055(%rbp,%rax,4), %eax
+	movb	%al, (%rdx)
+	movl	-1240(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1240(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rax, %rdx
+	movl	-1228(%rbp), %eax
+	cltq
+	movzbl	-1054(%rbp,%rax,4), %eax
+	movb	%al, (%rdx)
+	cmpl	$4, -1244(%rbp)
+	jne	.L1773
+	movl	-1240(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1240(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rdx, %rax
+	movb	$-1, (%rax)
+.L1773:
+	movl	-1256(%rbp), %eax
+	leal	1(%rax), %edx
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, %edx
+	je	.L1819
+	movl	-1104(%rbp), %eax
+	cmpl	$8, %eax
+	jne	.L1776
+	movq	-1288(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	jmp	.L1777
+.L1776:
+	movl	-1224(%rbp), %eax
+.L1777:
+	movl	%eax, -1228(%rbp)
+	movl	-1240(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1240(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rax, %rdx
+	movl	-1228(%rbp), %eax
+	cltq
+	movzbl	-1056(%rbp,%rax,4), %eax
+	movb	%al, (%rdx)
+	movl	-1240(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1240(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rax, %rdx
+	movl	-1228(%rbp), %eax
+	cltq
+	movzbl	-1055(%rbp,%rax,4), %eax
+	movb	%al, (%rdx)
+	movl	-1240(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1240(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rax, %rdx
+	movl	-1228(%rbp), %eax
+	cltq
+	movzbl	-1054(%rbp,%rax,4), %eax
+	movb	%al, (%rdx)
+	cmpl	$4, -1244(%rbp)
+	jne	.L1778
+	movl	-1240(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1240(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rdx, %rax
+	movb	$-1, (%rax)
+.L1778:
+	addl	$2, -1256(%rbp)
+.L1771:
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -1256(%rbp)
+	jl	.L1779
+	jmp	.L1775
+.L1819:
+	nop
+.L1775:
+	movl	-1148(%rbp), %edx
+	movq	-1288(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	addl	$1, -1252(%rbp)
+.L1770:
+	movq	-1288(%rbp), %rax
+	movl	4(%rax), %eax
+	cmpl	%eax, -1252(%rbp)
+	jl	.L1780
+	jmp	.L1769
+.L1748:
+	movl	$0, -1220(%rbp)
+	movl	$0, -1216(%rbp)
+	movl	$0, -1212(%rbp)
+	movl	$0, -1208(%rbp)
+	movl	$0, -1204(%rbp)
+	movl	$0, -1200(%rbp)
+	movl	$0, -1196(%rbp)
+	movl	$0, -1192(%rbp)
+	movl	$0, -1188(%rbp)
+	movl	$0, -1184(%rbp)
+	movl	-1100(%rbp), %edx
+	movl	-1072(%rbp), %eax
+	subl	%eax, %edx
+	movl	-1096(%rbp), %eax
+	subl	%eax, %edx
+	movq	-1288(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movl	-1104(%rbp), %eax
+	cmpl	$24, %eax
+	jne	.L1781
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %edx
+	movl	%edx, %eax
+	addl	%eax, %eax
+	addl	%edx, %eax
+	movl	%eax, -1248(%rbp)
+	jmp	.L1782
+.L1781:
+	movl	-1104(%rbp), %eax
+	cmpl	$16, %eax
+	jne	.L1783
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %eax
+	addl	%eax, %eax
+	movl	%eax, -1248(%rbp)
+	jmp	.L1782
+.L1783:
+	movl	$0, -1248(%rbp)
+.L1782:
+	movl	-1248(%rbp), %eax
+	negl	%eax
+	andl	$3, %eax
+	movl	%eax, -1148(%rbp)
+	movl	-1104(%rbp), %eax
+	cmpl	$24, %eax
+	jne	.L1784
+	movl	$1, -1184(%rbp)
+	jmp	.L1785
+.L1784:
+	movl	-1104(%rbp), %eax
+	cmpl	$32, %eax
+	jne	.L1785
+	cmpl	$255, -1172(%rbp)
+	jne	.L1785
+	cmpl	$65280, -1176(%rbp)
+	jne	.L1785
+	cmpl	$16711680, -1180(%rbp)
+	jne	.L1785
+	cmpl	$-16777216, -1168(%rbp)
+	jne	.L1785
+	movl	$2, -1184(%rbp)
+.L1785:
+	cmpl	$0, -1184(%rbp)
+	jne	.L1786
+	cmpl	$0, -1180(%rbp)
+	je	.L1787
+	cmpl	$0, -1176(%rbp)
+	je	.L1787
+	cmpl	$0, -1172(%rbp)
+	jne	.L1788
+.L1787:
+	movq	-1128(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L1816
+.L1788:
+	movl	-1180(%rbp), %eax
+	movl	%eax, %edi
+	call	stbi__high_bit
+	subl	$7, %eax
+	movl	%eax, -1220(%rbp)
+	movl	-1180(%rbp), %eax
+	movl	%eax, %edi
+	call	stbi__bitcount
+	movl	%eax, -1204(%rbp)
+	movl	-1176(%rbp), %eax
+	movl	%eax, %edi
+	call	stbi__high_bit
+	subl	$7, %eax
+	movl	%eax, -1216(%rbp)
+	movl	-1176(%rbp), %eax
+	movl	%eax, %edi
+	call	stbi__bitcount
+	movl	%eax, -1200(%rbp)
+	movl	-1172(%rbp), %eax
+	movl	%eax, %edi
+	call	stbi__high_bit
+	subl	$7, %eax
+	movl	%eax, -1212(%rbp)
+	movl	-1172(%rbp), %eax
+	movl	%eax, %edi
+	call	stbi__bitcount
+	movl	%eax, -1196(%rbp)
+	movl	-1168(%rbp), %eax
+	movl	%eax, %edi
+	call	stbi__high_bit
+	subl	$7, %eax
+	movl	%eax, -1208(%rbp)
+	movl	-1168(%rbp), %eax
+	movl	%eax, %edi
+	call	stbi__bitcount
+	movl	%eax, -1192(%rbp)
+	cmpl	$8, -1204(%rbp)
+	jg	.L1789
+	cmpl	$8, -1200(%rbp)
+	jg	.L1789
+	cmpl	$8, -1196(%rbp)
+	jg	.L1789
+	cmpl	$8, -1192(%rbp)
+	jle	.L1786
+.L1789:
+	movq	-1128(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L1816
+.L1786:
+	movl	$0, -1252(%rbp)
+	jmp	.L1790
+.L1805:
+	cmpl	$0, -1184(%rbp)
+	je	.L1791
+	movl	$0, -1256(%rbp)
+	jmp	.L1792
+.L1796:
+	movl	-1188(%rbp), %eax
+	cltq
+	leaq	2(%rax), %rdx
+	movq	-1128(%rbp), %rax
+	leaq	(%rdx,%rax), %rbx
+	movq	-1288(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, (%rbx)
+	movl	-1188(%rbp), %eax
+	cltq
+	leaq	1(%rax), %rdx
+	movq	-1128(%rbp), %rax
+	leaq	(%rdx,%rax), %rbx
+	movq	-1288(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, (%rbx)
+	movl	-1188(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	leaq	(%rdx,%rax), %rbx
+	movq	-1288(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, (%rbx)
+	addl	$3, -1188(%rbp)
+	cmpl	$2, -1184(%rbp)
+	jne	.L1793
+	movq	-1288(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	jmp	.L1794
+.L1793:
+	movl	$-1, %eax
+.L1794:
+	movb	%al, -1266(%rbp)
+	movzbl	-1266(%rbp), %eax
+	orl	%eax, -1264(%rbp)
+	cmpl	$4, -1244(%rbp)
+	jne	.L1795
+	movl	-1188(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1188(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rax, %rdx
+	movzbl	-1266(%rbp), %eax
+	movb	%al, (%rdx)
+.L1795:
+	addl	$1, -1256(%rbp)
+.L1792:
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -1256(%rbp)
+	jl	.L1796
+	jmp	.L1797
+.L1791:
+	movl	-1104(%rbp), %eax
+	movl	%eax, -1144(%rbp)
+	movl	$0, -1256(%rbp)
+	jmp	.L1798
+.L1804:
+	cmpl	$16, -1144(%rbp)
+	jne	.L1799
+	movq	-1288(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	jmp	.L1800
+.L1799:
+	movq	-1288(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32le
+.L1800:
+	movl	%eax, -1140(%rbp)
+	movl	-1140(%rbp), %eax
+	andl	-1180(%rbp), %eax
+	movl	-1204(%rbp), %edx
+	movl	-1220(%rbp), %ecx
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__shiftsigned
+	movl	%eax, %ecx
+	movl	-1188(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1188(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	movl	-1140(%rbp), %eax
+	andl	-1176(%rbp), %eax
+	movl	-1200(%rbp), %edx
+	movl	-1216(%rbp), %ecx
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__shiftsigned
+	movl	%eax, %ecx
+	movl	-1188(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1188(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	movl	-1140(%rbp), %eax
+	andl	-1172(%rbp), %eax
+	movl	-1196(%rbp), %edx
+	movl	-1212(%rbp), %ecx
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__shiftsigned
+	movl	%eax, %ecx
+	movl	-1188(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1188(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rdx, %rax
+	movl	%ecx, %edx
+	movb	%dl, (%rax)
+	cmpl	$0, -1168(%rbp)
+	je	.L1801
+	movl	-1140(%rbp), %eax
+	andl	-1168(%rbp), %eax
+	movl	-1192(%rbp), %edx
+	movl	-1208(%rbp), %ecx
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__shiftsigned
+	jmp	.L1802
+.L1801:
+	movl	$255, %eax
+.L1802:
+	movl	%eax, -1136(%rbp)
+	movl	-1136(%rbp), %eax
+	orl	%eax, -1264(%rbp)
+	cmpl	$4, -1244(%rbp)
+	jne	.L1803
+	movl	-1188(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1188(%rbp)
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rdx, %rax
+	movl	-1136(%rbp), %edx
+	movb	%dl, (%rax)
+.L1803:
+	addl	$1, -1256(%rbp)
+.L1798:
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, -1256(%rbp)
+	jl	.L1804
+.L1797:
+	movl	-1148(%rbp), %edx
+	movq	-1288(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	addl	$1, -1252(%rbp)
+.L1790:
+	movq	-1288(%rbp), %rax
+	movl	4(%rax), %eax
+	cmpl	%eax, -1252(%rbp)
+	jl	.L1805
+.L1769:
+	cmpl	$4, -1244(%rbp)
+	jne	.L1806
+	cmpl	$0, -1264(%rbp)
+	jne	.L1806
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %edx
+	movq	-1288(%rbp), %rax
+	movl	4(%rax), %eax
+	imull	%edx, %eax
+	sall	$2, %eax
+	subl	$1, %eax
+	movl	%eax, -1256(%rbp)
+	jmp	.L1807
+.L1808:
+	movl	-1256(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1128(%rbp), %rax
+	addq	%rdx, %rax
+	movb	$-1, (%rax)
+	subl	$4, -1256(%rbp)
+.L1807:
+	cmpl	$0, -1256(%rbp)
+	jns	.L1808
+.L1806:
+	cmpl	$0, -1164(%rbp)
+	je	.L1809
+	movl	$0, -1252(%rbp)
+	jmp	.L1810
+.L1813:
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %edx
+	movl	-1252(%rbp), %eax
+	imull	%eax, %edx
+	movl	-1244(%rbp), %eax
+	imull	%edx, %eax
+	movl	%eax, %edx
+	movq	-1128(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -1120(%rbp)
+	movq	-1288(%rbp), %rax
+	movl	4(%rax), %edx
+	movl	-1252(%rbp), %eax
+	subl	%eax, %edx
+	subl	$1, %edx
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %eax
+	imull	%eax, %edx
+	movl	-1244(%rbp), %eax
+	imull	%edx, %eax
+	movl	%eax, %edx
+	movq	-1128(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -1112(%rbp)
+	movl	$0, -1256(%rbp)
+	jmp	.L1811
+.L1812:
+	movl	-1256(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1120(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movb	%al, -1265(%rbp)
+	movl	-1256(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1112(%rbp), %rax
+	addq	%rdx, %rax
+	movl	-1256(%rbp), %edx
+	movslq	%edx, %rcx
+	movq	-1120(%rbp), %rdx
+	addq	%rcx, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	movl	-1256(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-1112(%rbp), %rax
+	addq	%rax, %rdx
+	movzbl	-1265(%rbp), %eax
+	movb	%al, (%rdx)
+	addl	$1, -1256(%rbp)
+.L1811:
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %eax
+	imull	-1244(%rbp), %eax
+	cmpl	%eax, -1256(%rbp)
+	jl	.L1812
+	addl	$1, -1252(%rbp)
+.L1810:
+	movq	-1288(%rbp), %rax
+	movl	4(%rax), %eax
+	sarl	%eax
+	cmpl	%eax, -1252(%rbp)
+	jl	.L1813
+.L1809:
+	cmpl	$0, -1316(%rbp)
+	je	.L1814
+	movl	-1316(%rbp), %eax
+	cmpl	-1244(%rbp), %eax
+	je	.L1814
+	movq	-1288(%rbp), %rax
+	movl	4(%rax), %edi
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %ecx
+	movl	-1316(%rbp), %edx
+	movl	-1244(%rbp), %esi
+	movq	-1128(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__convert_format
+	movq	%rax, -1128(%rbp)
+	cmpq	$0, -1128(%rbp)
+	jne	.L1814
+	movq	-1128(%rbp), %rax
+	jmp	.L1816
+.L1814:
+	movq	-1288(%rbp), %rax
+	movl	(%rax), %eax
+	movl	%eax, %edx
+	movq	-1296(%rbp), %rax
+	movl	%edx, (%rax)
+	movq	-1288(%rbp), %rax
+	movl	4(%rax), %eax
+	movl	%eax, %edx
+	movq	-1304(%rbp), %rax
+	movl	%edx, (%rax)
+	cmpq	$0, -1312(%rbp)
+	je	.L1815
+	movq	-1288(%rbp), %rax
+	movl	8(%rax), %edx
+	movq	-1312(%rbp), %rax
+	movl	%edx, (%rax)
+.L1815:
+	movq	-1128(%rbp), %rax
+.L1816:
+	movq	-24(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L1817
+	call	__stack_chk_fail@PLT
+.L1817:
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5040:
+	.size	stbi__bmp_load, .-stbi__bmp_load
+	.type	stbi__tga_get_comp, @function
+stbi__tga_get_comp:
+.LFB5041:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -4(%rbp)
+	movl	%esi, -8(%rbp)
+	movq	%rdx, -16(%rbp)
+	cmpq	$0, -16(%rbp)
+	je	.L1821
+	movq	-16(%rbp), %rax
+	movl	$0, (%rax)
+.L1821:
+	movl	-4(%rbp), %eax
+	subl	$8, %eax
+	cmpl	$24, %eax
+	ja	.L1822
+	movl	%eax, %eax
+	leaq	0(,%rax,4), %rdx
+	leaq	.L1824(%rip), %rax
+	movl	(%rdx,%rax), %eax
+	cltq
+	leaq	.L1824(%rip), %rdx
+	addq	%rdx, %rax
+	jmp	*%rax
+	.section	.rodata
+	.align 4
+	.align 4
+.L1824:
+	.long	.L1827-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1826-.L1824
+	.long	.L1825-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1823-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1822-.L1824
+	.long	.L1823-.L1824
+	.text
+.L1827:
+	movl	$1, %eax
+	jmp	.L1828
+.L1825:
+	cmpl	$0, -8(%rbp)
+	je	.L1826
+	movl	$2, %eax
+	jmp	.L1828
+.L1826:
+	cmpq	$0, -16(%rbp)
+	je	.L1829
+	movq	-16(%rbp), %rax
+	movl	$1, (%rax)
+.L1829:
+	movl	$3, %eax
+	jmp	.L1828
+.L1823:
+	movl	-4(%rbp), %eax
+	leal	7(%rax), %edx
+	testl	%eax, %eax
+	cmovs	%edx, %eax
+	sarl	$3, %eax
+	jmp	.L1828
+.L1822:
+	movl	$0, %eax
+.L1828:
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5041:
+	.size	stbi__tga_get_comp, .-stbi__tga_get_comp
+	.type	stbi__tga_info, @function
+stbi__tga_info:
+.LFB5042:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -40(%rbp)
+	movq	%rsi, -48(%rbp)
+	movq	%rdx, -56(%rbp)
+	movq	%rcx, -64(%rbp)
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -24(%rbp)
+	cmpl	$1, -24(%rbp)
+	jle	.L1831
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L1832
+.L1831:
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -20(%rbp)
+	cmpl	$1, -24(%rbp)
+	jne	.L1833
+	cmpl	$1, -20(%rbp)
+	je	.L1834
+	cmpl	$9, -20(%rbp)
+	je	.L1834
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L1832
+.L1834:
+	movq	-40(%rbp), %rax
+	movl	$4, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -16(%rbp)
+	cmpl	$8, -16(%rbp)
+	je	.L1835
+	cmpl	$15, -16(%rbp)
+	je	.L1835
+	cmpl	$16, -16(%rbp)
+	je	.L1835
+	cmpl	$24, -16(%rbp)
+	je	.L1835
+	cmpl	$32, -16(%rbp)
+	je	.L1835
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L1832
+.L1835:
+	movq	-40(%rbp), %rax
+	movl	$4, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movl	-16(%rbp), %eax
+	movl	%eax, -28(%rbp)
+	jmp	.L1836
+.L1833:
+	cmpl	$2, -20(%rbp)
+	je	.L1837
+	cmpl	$3, -20(%rbp)
+	je	.L1837
+	cmpl	$10, -20(%rbp)
+	je	.L1837
+	cmpl	$11, -20(%rbp)
+	je	.L1837
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L1832
+.L1837:
+	movq	-40(%rbp), %rax
+	movl	$9, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movl	$0, -28(%rbp)
+.L1836:
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movl	%eax, -12(%rbp)
+	cmpl	$0, -12(%rbp)
+	jg	.L1838
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L1832
+.L1838:
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movl	%eax, -8(%rbp)
+	cmpl	$0, -8(%rbp)
+	jg	.L1839
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L1832
+.L1839:
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	cmpl	$0, -28(%rbp)
+	je	.L1840
+	cmpl	$8, -4(%rbp)
+	je	.L1841
+	cmpl	$16, -4(%rbp)
+	je	.L1841
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L1832
+.L1841:
+	movl	-28(%rbp), %eax
+	movl	$0, %edx
+	movl	$0, %esi
+	movl	%eax, %edi
+	call	stbi__tga_get_comp
+	movl	%eax, -32(%rbp)
+	jmp	.L1842
+.L1840:
+	cmpl	$3, -20(%rbp)
+	je	.L1843
+	cmpl	$11, -20(%rbp)
+	jne	.L1844
+.L1843:
+	movl	$1, %ecx
+	jmp	.L1845
+.L1844:
+	movl	$0, %ecx
+.L1845:
+	movl	-4(%rbp), %eax
+	movl	$0, %edx
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__tga_get_comp
+	movl	%eax, -32(%rbp)
+.L1842:
+	cmpl	$0, -32(%rbp)
+	jne	.L1846
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L1832
+.L1846:
+	cmpq	$0, -48(%rbp)
+	je	.L1847
+	movq	-48(%rbp), %rax
+	movl	-12(%rbp), %edx
+	movl	%edx, (%rax)
+.L1847:
+	cmpq	$0, -56(%rbp)
+	je	.L1848
+	movq	-56(%rbp), %rax
+	movl	-8(%rbp), %edx
+	movl	%edx, (%rax)
+.L1848:
+	cmpq	$0, -64(%rbp)
+	je	.L1849
+	movq	-64(%rbp), %rax
+	movl	-32(%rbp), %edx
+	movl	%edx, (%rax)
+.L1849:
+	movl	$1, %eax
+.L1832:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5042:
+	.size	stbi__tga_info, .-stbi__tga_info
+	.type	stbi__tga_test, @function
+stbi__tga_test:
+.LFB5043:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movl	$0, -12(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -8(%rbp)
+	cmpl	$1, -8(%rbp)
+	jg	.L1863
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+	cmpl	$1, -8(%rbp)
+	jne	.L1853
+	cmpl	$1, -4(%rbp)
+	je	.L1854
+	cmpl	$9, -4(%rbp)
+	jne	.L1864
+.L1854:
+	movq	-24(%rbp), %rax
+	movl	$4, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+	cmpl	$8, -4(%rbp)
+	je	.L1855
+	cmpl	$15, -4(%rbp)
+	je	.L1855
+	cmpl	$16, -4(%rbp)
+	je	.L1855
+	cmpl	$24, -4(%rbp)
+	je	.L1855
+	cmpl	$32, -4(%rbp)
+	jne	.L1865
+.L1855:
+	movq	-24(%rbp), %rax
+	movl	$4, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	jmp	.L1856
+.L1853:
+	cmpl	$2, -4(%rbp)
+	je	.L1857
+	cmpl	$3, -4(%rbp)
+	je	.L1857
+	cmpl	$10, -4(%rbp)
+	je	.L1857
+	cmpl	$11, -4(%rbp)
+	jne	.L1866
+.L1857:
+	movq	-24(%rbp), %rax
+	movl	$9, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+.L1856:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	testl	%eax, %eax
+	jle	.L1867
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	testl	%eax, %eax
+	jle	.L1868
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+	cmpl	$1, -8(%rbp)
+	jne	.L1860
+	cmpl	$8, -4(%rbp)
+	je	.L1860
+	cmpl	$16, -4(%rbp)
+	jne	.L1869
+.L1860:
+	cmpl	$8, -4(%rbp)
+	je	.L1861
+	cmpl	$15, -4(%rbp)
+	je	.L1861
+	cmpl	$16, -4(%rbp)
+	je	.L1861
+	cmpl	$24, -4(%rbp)
+	je	.L1861
+	cmpl	$32, -4(%rbp)
+	jne	.L1870
+.L1861:
+	movl	$1, -12(%rbp)
+	jmp	.L1852
+.L1863:
+	nop
+	jmp	.L1852
+.L1864:
+	nop
+	jmp	.L1852
+.L1865:
+	nop
+	jmp	.L1852
+.L1866:
+	nop
+	jmp	.L1852
+.L1867:
+	nop
+	jmp	.L1852
+.L1868:
+	nop
+	jmp	.L1852
+.L1869:
+	nop
+	jmp	.L1852
+.L1870:
+	nop
+.L1852:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	-12(%rbp), %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5043:
+	.size	stbi__tga_test, .-stbi__tga_test
+	.type	stbi__tga_read_rgb16, @function
+stbi__tga_read_rgb16:
+.LFB5044:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movw	%ax, -16(%rbp)
+	movw	$31, -14(%rbp)
+	movzwl	-16(%rbp), %eax
+	shrw	$10, %ax
+	andw	-14(%rbp), %ax
+	movzwl	%ax, %eax
+	movl	%eax, -12(%rbp)
+	movzwl	-16(%rbp), %eax
+	shrw	$5, %ax
+	andw	-14(%rbp), %ax
+	movzwl	%ax, %eax
+	movl	%eax, -8(%rbp)
+	movzwl	-16(%rbp), %eax
+	andw	-14(%rbp), %ax
+	movzwl	%ax, %eax
+	movl	%eax, -4(%rbp)
+	movl	-12(%rbp), %edx
+	movl	%edx, %eax
+	sall	$8, %eax
+	subl	%edx, %eax
+	movslq	%eax, %rdx
+	imulq	$-2078209981, %rdx, %rdx
+	shrq	$32, %rdx
+	addl	%eax, %edx
+	sarl	$4, %edx
+	sarl	$31, %eax
+	subl	%eax, %edx
+	movq	-32(%rbp), %rax
+	movb	%dl, (%rax)
+	movl	-8(%rbp), %edx
+	movl	%edx, %eax
+	sall	$8, %eax
+	subl	%edx, %eax
+	movslq	%eax, %rdx
+	imulq	$-2078209981, %rdx, %rdx
+	shrq	$32, %rdx
+	addl	%eax, %edx
+	sarl	$4, %edx
+	sarl	$31, %eax
+	subl	%eax, %edx
+	movq	-32(%rbp), %rax
+	addq	$1, %rax
+	movb	%dl, (%rax)
+	movl	-4(%rbp), %edx
+	movl	%edx, %eax
+	sall	$8, %eax
+	subl	%edx, %eax
+	movslq	%eax, %rdx
+	imulq	$-2078209981, %rdx, %rdx
+	shrq	$32, %rdx
+	addl	%eax, %edx
+	sarl	$4, %edx
+	sarl	$31, %eax
+	subl	%eax, %edx
+	movq	-32(%rbp), %rax
+	addq	$2, %rax
+	movb	%dl, (%rax)
+	nop
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5044:
+	.size	stbi__tga_read_rgb16, .-stbi__tga_read_rgb16
+	.section	.rodata
+.LC82:
+	.string	"tga_comp == STBI_rgb"
+	.text
+	.type	stbi__tga_load, @function
+stbi__tga_load:
+.LFB5045:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$208, %rsp
+	movq	%rdi, -168(%rbp)
+	movq	%rsi, -176(%rbp)
+	movq	%rdx, -184(%rbp)
+	movq	%rcx, -192(%rbp)
+	movl	%r8d, -196(%rbp)
+	movq	%r9, -208(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-168(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -108(%rbp)
+	movq	-168(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -104(%rbp)
+	movq	-168(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -152(%rbp)
+	movl	$0, -148(%rbp)
+	movq	-168(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movl	%eax, -100(%rbp)
+	movq	-168(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movl	%eax, -96(%rbp)
+	movq	-168(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -92(%rbp)
+	movq	-168(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movl	%eax, -88(%rbp)
+	movq	-168(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movl	%eax, -84(%rbp)
+	movq	-168(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movl	%eax, -80(%rbp)
+	movq	-168(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movl	%eax, -76(%rbp)
+	movq	-168(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -72(%rbp)
+	movl	$0, -156(%rbp)
+	movq	-168(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -68(%rbp)
+	movq	$0, -48(%rbp)
+	movl	$0, -12(%rbp)
+	movl	$0, -132(%rbp)
+	movl	$0, -128(%rbp)
+	movl	$1, -124(%rbp)
+	cmpl	$16777216, -76(%rbp)
+	jle	.L1873
+	movl	$0, %eax
+	jmp	.L1924
+.L1873:
+	cmpl	$16777216, -80(%rbp)
+	jle	.L1875
+	movl	$0, %eax
+	jmp	.L1924
+.L1875:
+	cmpl	$7, -152(%rbp)
+	jle	.L1876
+	subl	$8, -152(%rbp)
+	movl	$1, -148(%rbp)
+.L1876:
+	movl	-68(%rbp), %eax
+	andl	$32, %eax
+	testl	%eax, %eax
+	sete	%al
+	movzbl	%al, %eax
+	movl	%eax, -68(%rbp)
+	cmpl	$0, -104(%rbp)
+	je	.L1877
+	leaq	-156(%rbp), %rdx
+	movl	-92(%rbp), %eax
+	movl	$0, %esi
+	movl	%eax, %edi
+	call	stbi__tga_get_comp
+	movl	%eax, -144(%rbp)
+	jmp	.L1878
+.L1877:
+	cmpl	$3, -152(%rbp)
+	sete	%al
+	movzbl	%al, %ecx
+	leaq	-156(%rbp), %rdx
+	movl	-72(%rbp), %eax
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__tga_get_comp
+	movl	%eax, -144(%rbp)
+.L1878:
+	cmpl	$0, -144(%rbp)
+	jne	.L1879
+	movl	$0, %eax
+	jmp	.L1924
+.L1879:
+	movq	-176(%rbp), %rax
+	movl	-80(%rbp), %edx
+	movl	%edx, (%rax)
+	movq	-184(%rbp), %rax
+	movl	-76(%rbp), %edx
+	movl	%edx, (%rax)
+	cmpq	$0, -192(%rbp)
+	je	.L1880
+	movq	-192(%rbp), %rax
+	movl	-144(%rbp), %edx
+	movl	%edx, (%rax)
+.L1880:
+	movl	-144(%rbp), %edx
+	movl	-76(%rbp), %esi
+	movl	-80(%rbp), %eax
+	movl	$0, %ecx
+	movl	%eax, %edi
+	call	stbi__mad3sizes_valid
+	testl	%eax, %eax
+	jne	.L1881
+	movl	$0, %eax
+	jmp	.L1924
+.L1881:
+	movl	-144(%rbp), %edx
+	movl	-76(%rbp), %esi
+	movl	-80(%rbp), %eax
+	movl	$0, %ecx
+	movl	%eax, %edi
+	call	stbi__malloc_mad3
+	movq	%rax, -56(%rbp)
+	cmpq	$0, -56(%rbp)
+	jne	.L1882
+	movl	$0, %eax
+	jmp	.L1924
+.L1882:
+	movl	-108(%rbp), %edx
+	movq	-168(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	cmpl	$0, -104(%rbp)
+	jne	.L1883
+	cmpl	$0, -148(%rbp)
+	jne	.L1883
+	movl	-156(%rbp), %eax
+	testl	%eax, %eax
+	jne	.L1883
+	movl	$0, -140(%rbp)
+	jmp	.L1884
+.L1887:
+	cmpl	$0, -68(%rbp)
+	je	.L1885
+	movl	-76(%rbp), %eax
+	subl	-140(%rbp), %eax
+	subl	$1, %eax
+	jmp	.L1886
+.L1885:
+	movl	-140(%rbp), %eax
+.L1886:
+	movl	%eax, -64(%rbp)
+	movl	-64(%rbp), %eax
+	imull	-80(%rbp), %eax
+	imull	-144(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-56(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -24(%rbp)
+	movl	-80(%rbp), %eax
+	imull	-144(%rbp), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rcx
+	movq	-168(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__getn
+	addl	$1, -140(%rbp)
+.L1884:
+	movl	-140(%rbp), %eax
+	cmpl	-76(%rbp), %eax
+	jl	.L1887
+	jmp	.L1888
+.L1883:
+	cmpl	$0, -104(%rbp)
+	je	.L1889
+	cmpl	$0, -96(%rbp)
+	jne	.L1890
+	movq	-56(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L1924
+.L1890:
+	movl	-100(%rbp), %edx
+	movq	-168(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movl	-144(%rbp), %ecx
+	movl	-96(%rbp), %eax
+	movl	$0, %edx
+	movl	%ecx, %esi
+	movl	%eax, %edi
+	call	stbi__malloc_mad2
+	movq	%rax, -48(%rbp)
+	cmpq	$0, -48(%rbp)
+	jne	.L1891
+	movq	-56(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L1924
+.L1891:
+	movl	-156(%rbp), %eax
+	testl	%eax, %eax
+	je	.L1892
+	movq	-48(%rbp), %rax
+	movq	%rax, -40(%rbp)
+	cmpl	$3, -144(%rbp)
+	je	.L1893
+	leaq	__PRETTY_FUNCTION__.0(%rip), %rax
+	movq	%rax, %rcx
+	movl	$5958, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC82(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L1893:
+	movl	$0, -140(%rbp)
+	jmp	.L1894
+.L1895:
+	movq	-40(%rbp), %rdx
+	movq	-168(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__tga_read_rgb16
+	movl	-144(%rbp), %eax
+	cltq
+	addq	%rax, -40(%rbp)
+	addl	$1, -140(%rbp)
+.L1894:
+	movl	-140(%rbp), %eax
+	cmpl	-96(%rbp), %eax
+	jl	.L1895
+	jmp	.L1889
+.L1892:
+	movl	-96(%rbp), %eax
+	imull	-144(%rbp), %eax
+	movl	%eax, %edx
+	movq	-48(%rbp), %rcx
+	movq	-168(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__getn
+	testl	%eax, %eax
+	jne	.L1889
+	movq	-56(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-48(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L1924
+.L1889:
+	movl	$0, -140(%rbp)
+	jmp	.L1896
+.L1914:
+	cmpl	$0, -148(%rbp)
+	je	.L1897
+	cmpl	$0, -132(%rbp)
+	jne	.L1898
+	movq	-168(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -60(%rbp)
+	movl	-60(%rbp), %eax
+	andl	$127, %eax
+	addl	$1, %eax
+	movl	%eax, -132(%rbp)
+	movl	-60(%rbp), %eax
+	sarl	$7, %eax
+	movl	%eax, -128(%rbp)
+	movl	$1, -124(%rbp)
+	jmp	.L1899
+.L1898:
+	cmpl	$0, -128(%rbp)
+	jne	.L1899
+	movl	$1, -124(%rbp)
+	jmp	.L1899
+.L1897:
+	movl	$1, -124(%rbp)
+.L1899:
+	cmpl	$0, -124(%rbp)
+	je	.L1900
+	cmpl	$0, -104(%rbp)
+	je	.L1901
+	cmpl	$8, -72(%rbp)
+	jne	.L1902
+	movq	-168(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	jmp	.L1903
+.L1902:
+	movq	-168(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+.L1903:
+	movl	%eax, -120(%rbp)
+	movl	-120(%rbp), %eax
+	cmpl	-96(%rbp), %eax
+	jl	.L1904
+	movl	$0, -120(%rbp)
+.L1904:
+	movl	-120(%rbp), %eax
+	imull	-144(%rbp), %eax
+	movl	%eax, -120(%rbp)
+	movl	$0, -136(%rbp)
+	jmp	.L1905
+.L1906:
+	movl	-120(%rbp), %edx
+	movl	-136(%rbp), %eax
+	addl	%edx, %eax
+	movslq	%eax, %rdx
+	movq	-48(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %edx
+	movl	-136(%rbp), %eax
+	cltq
+	movb	%dl, -12(%rbp,%rax)
+	addl	$1, -136(%rbp)
+.L1905:
+	movl	-136(%rbp), %eax
+	cmpl	-144(%rbp), %eax
+	jl	.L1906
+	jmp	.L1907
+.L1901:
+	movl	-156(%rbp), %eax
+	testl	%eax, %eax
+	je	.L1908
+	cmpl	$3, -144(%rbp)
+	je	.L1909
+	leaq	__PRETTY_FUNCTION__.0(%rip), %rax
+	movq	%rax, %rcx
+	movl	$6007, %edx
+	leaq	.LC0(%rip), %rax
+	movq	%rax, %rsi
+	leaq	.LC82(%rip), %rax
+	movq	%rax, %rdi
+	call	__assert_fail@PLT
+.L1909:
+	leaq	-12(%rbp), %rdx
+	movq	-168(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__tga_read_rgb16
+	jmp	.L1907
+.L1908:
+	movl	$0, -136(%rbp)
+	jmp	.L1910
+.L1911:
+	movq	-168(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movl	-136(%rbp), %edx
+	movslq	%edx, %rdx
+	movb	%al, -12(%rbp,%rdx)
+	addl	$1, -136(%rbp)
+.L1910:
+	movl	-136(%rbp), %eax
+	cmpl	-144(%rbp), %eax
+	jl	.L1911
+.L1907:
+	movl	$0, -124(%rbp)
+.L1900:
+	movl	$0, -136(%rbp)
+	jmp	.L1912
+.L1913:
+	movl	-140(%rbp), %eax
+	imull	-144(%rbp), %eax
+	movl	%eax, %edx
+	movl	-136(%rbp), %eax
+	addl	%edx, %eax
+	movslq	%eax, %rdx
+	movq	-56(%rbp), %rax
+	addq	%rax, %rdx
+	movl	-136(%rbp), %eax
+	cltq
+	movzbl	-12(%rbp,%rax), %eax
+	movb	%al, (%rdx)
+	addl	$1, -136(%rbp)
+.L1912:
+	movl	-136(%rbp), %eax
+	cmpl	-144(%rbp), %eax
+	jl	.L1913
+	subl	$1, -132(%rbp)
+	addl	$1, -140(%rbp)
+.L1896:
+	movl	-80(%rbp), %eax
+	imull	-76(%rbp), %eax
+	cmpl	%eax, -140(%rbp)
+	jl	.L1914
+	cmpl	$0, -68(%rbp)
+	je	.L1915
+	movl	$0, -136(%rbp)
+	jmp	.L1916
+.L1919:
+	movl	-136(%rbp), %eax
+	imull	-80(%rbp), %eax
+	movl	-144(%rbp), %edx
+	imull	%edx, %eax
+	movl	%eax, -116(%rbp)
+	movl	-76(%rbp), %eax
+	subl	$1, %eax
+	subl	-136(%rbp), %eax
+	imull	-80(%rbp), %eax
+	movl	-144(%rbp), %edx
+	imull	%edx, %eax
+	movl	%eax, -112(%rbp)
+	movl	-80(%rbp), %eax
+	imull	-144(%rbp), %eax
+	movl	%eax, -140(%rbp)
+	jmp	.L1917
+.L1918:
+	movl	-116(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-56(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movb	%al, -158(%rbp)
+	movl	-112(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-56(%rbp), %rax
+	addq	%rdx, %rax
+	movl	-116(%rbp), %edx
+	movslq	%edx, %rcx
+	movq	-56(%rbp), %rdx
+	addq	%rcx, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	movl	-112(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-56(%rbp), %rax
+	addq	%rax, %rdx
+	movzbl	-158(%rbp), %eax
+	movb	%al, (%rdx)
+	addl	$1, -116(%rbp)
+	addl	$1, -112(%rbp)
+	subl	$1, -140(%rbp)
+.L1917:
+	cmpl	$0, -140(%rbp)
+	jg	.L1918
+	addl	$1, -136(%rbp)
+.L1916:
+	movl	-136(%rbp), %eax
+	addl	%eax, %eax
+	cmpl	%eax, -76(%rbp)
+	jg	.L1919
+.L1915:
+	cmpq	$0, -48(%rbp)
+	je	.L1888
+	movq	-48(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+.L1888:
+	cmpl	$2, -144(%rbp)
+	jle	.L1920
+	movl	-156(%rbp), %eax
+	testl	%eax, %eax
+	jne	.L1920
+	movq	-56(%rbp), %rax
+	movq	%rax, -32(%rbp)
+	movl	$0, -140(%rbp)
+	jmp	.L1921
+.L1922:
+	movq	-32(%rbp), %rax
+	movzbl	(%rax), %eax
+	movb	%al, -157(%rbp)
+	movq	-32(%rbp), %rax
+	movzbl	2(%rax), %edx
+	movq	-32(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-32(%rbp), %rax
+	leaq	2(%rax), %rdx
+	movzbl	-157(%rbp), %eax
+	movb	%al, (%rdx)
+	movl	-144(%rbp), %eax
+	cltq
+	addq	%rax, -32(%rbp)
+	addl	$1, -140(%rbp)
+.L1921:
+	movl	-80(%rbp), %eax
+	imull	-76(%rbp), %eax
+	cmpl	%eax, -140(%rbp)
+	jl	.L1922
+.L1920:
+	cmpl	$0, -196(%rbp)
+	je	.L1923
+	movl	-196(%rbp), %eax
+	cmpl	-144(%rbp), %eax
+	je	.L1923
+	movl	-76(%rbp), %edi
+	movl	-80(%rbp), %ecx
+	movl	-196(%rbp), %edx
+	movl	-144(%rbp), %esi
+	movq	-56(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__convert_format
+	movq	%rax, -56(%rbp)
+.L1923:
+	movl	$0, -84(%rbp)
+	movl	-84(%rbp), %eax
+	movl	%eax, -88(%rbp)
+	movl	-88(%rbp), %eax
+	movl	%eax, -92(%rbp)
+	movl	-92(%rbp), %eax
+	movl	%eax, -96(%rbp)
+	movl	-96(%rbp), %eax
+	movl	%eax, -100(%rbp)
+	movq	-56(%rbp), %rax
+.L1924:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L1925
+	call	__stack_chk_fail@PLT
+.L1925:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5045:
+	.size	stbi__tga_load, .-stbi__tga_load
+	.type	stbi__psd_test, @function
+stbi__psd_test:
+.LFB5046:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	cmpl	$943870035, %eax
+	sete	%al
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	-4(%rbp), %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5046:
+	.size	stbi__psd_test, .-stbi__psd_test
+	.type	stbi__psd_decode_rle, @function
+stbi__psd_decode_rle:
+.LFB5047:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movl	%edx, -36(%rbp)
+	movl	$0, -12(%rbp)
+	jmp	.L1929
+.L1938:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -8(%rbp)
+	cmpl	$128, -8(%rbp)
+	je	.L1929
+	cmpl	$127, -8(%rbp)
+	jg	.L1930
+	addl	$1, -8(%rbp)
+	movl	-8(%rbp), %eax
+	cmpl	-4(%rbp), %eax
+	jle	.L1931
+	movl	$0, %eax
+	jmp	.L1932
+.L1931:
+	movl	-8(%rbp), %eax
+	addl	%eax, -12(%rbp)
+	jmp	.L1933
+.L1934:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movq	-32(%rbp), %rdx
+	movb	%al, (%rdx)
+	addq	$4, -32(%rbp)
+	subl	$1, -8(%rbp)
+.L1933:
+	cmpl	$0, -8(%rbp)
+	jne	.L1934
+	jmp	.L1929
+.L1930:
+	cmpl	$128, -8(%rbp)
+	jle	.L1929
+	movl	$257, %eax
+	subl	-8(%rbp), %eax
+	movl	%eax, -8(%rbp)
+	movl	-8(%rbp), %eax
+	cmpl	-4(%rbp), %eax
+	jle	.L1935
+	movl	$0, %eax
+	jmp	.L1932
+.L1935:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -13(%rbp)
+	movl	-8(%rbp), %eax
+	addl	%eax, -12(%rbp)
+	jmp	.L1936
+.L1937:
+	movq	-32(%rbp), %rax
+	movzbl	-13(%rbp), %edx
+	movb	%dl, (%rax)
+	addq	$4, -32(%rbp)
+	subl	$1, -8(%rbp)
+.L1936:
+	cmpl	$0, -8(%rbp)
+	jne	.L1937
+.L1929:
+	movl	-36(%rbp), %eax
+	subl	-12(%rbp), %eax
+	movl	%eax, -4(%rbp)
+	cmpl	$0, -4(%rbp)
+	jg	.L1938
+	movl	$1, %eax
+.L1932:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5047:
+	.size	stbi__psd_decode_rle, .-stbi__psd_decode_rle
+	.type	stbi__psd_load, @function
+stbi__psd_load:
+.LFB5048:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$176, %rsp
+	movq	%rdi, -136(%rbp)
+	movq	%rsi, -144(%rbp)
+	movq	%rdx, -152(%rbp)
+	movq	%rcx, -160(%rbp)
+	movl	%r8d, -164(%rbp)
+	movq	%r9, -176(%rbp)
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	cmpl	$943870035, %eax
+	je	.L1940
+	movl	$0, %eax
+	jmp	.L1941
+.L1940:
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	cmpl	$1, %eax
+	je	.L1942
+	movl	$0, %eax
+	jmp	.L1941
+.L1942:
+	movq	-136(%rbp), %rax
+	movl	$6, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, -112(%rbp)
+	cmpl	$0, -112(%rbp)
+	js	.L1943
+	cmpl	$16, -112(%rbp)
+	jle	.L1944
+.L1943:
+	movl	$0, %eax
+	jmp	.L1941
+.L1944:
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	movl	%eax, -108(%rbp)
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	movl	%eax, -104(%rbp)
+	cmpl	$16777216, -108(%rbp)
+	jle	.L1945
+	movl	$0, %eax
+	jmp	.L1941
+.L1945:
+	cmpl	$16777216, -104(%rbp)
+	jle	.L1946
+	movl	$0, %eax
+	jmp	.L1941
+.L1946:
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, -100(%rbp)
+	cmpl	$8, -100(%rbp)
+	je	.L1947
+	cmpl	$16, -100(%rbp)
+	je	.L1947
+	movl	$0, %eax
+	jmp	.L1941
+.L1947:
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	cmpl	$3, %eax
+	je	.L1948
+	movl	$0, %eax
+	jmp	.L1941
+.L1948:
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	movl	%eax, %edx
+	movq	-136(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	movl	%eax, %edx
+	movq	-136(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	movl	%eax, %edx
+	movq	-136(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, -96(%rbp)
+	cmpl	$1, -96(%rbp)
+	jle	.L1949
+	movl	$0, %eax
+	jmp	.L1941
+.L1949:
+	movl	-108(%rbp), %edx
+	movl	-104(%rbp), %eax
+	movl	$0, %ecx
+	movl	%eax, %esi
+	movl	$4, %edi
+	call	stbi__mad3sizes_valid
+	testl	%eax, %eax
+	jne	.L1950
+	movl	$0, %eax
+	jmp	.L1941
+.L1950:
+	cmpl	$0, -96(%rbp)
+	jne	.L1951
+	cmpl	$16, -100(%rbp)
+	jne	.L1951
+	cmpl	$16, 16(%rbp)
+	jne	.L1951
+	movl	-108(%rbp), %edx
+	movl	-104(%rbp), %eax
+	movl	$0, %ecx
+	movl	%eax, %esi
+	movl	$8, %edi
+	call	stbi__malloc_mad3
+	movq	%rax, -64(%rbp)
+	movq	-176(%rbp), %rax
+	movl	$16, (%rax)
+	jmp	.L1952
+.L1951:
+	movl	-104(%rbp), %eax
+	imull	-108(%rbp), %eax
+	sall	$2, %eax
+	cltq
+	movq	%rax, %rdi
+	call	stbi__malloc
+	movq	%rax, -64(%rbp)
+.L1952:
+	cmpq	$0, -64(%rbp)
+	jne	.L1953
+	movl	$0, %eax
+	jmp	.L1941
+.L1953:
+	movl	-104(%rbp), %eax
+	imull	-108(%rbp), %eax
+	movl	%eax, -92(%rbp)
+	cmpl	$0, -96(%rbp)
+	je	.L1954
+	movl	-108(%rbp), %eax
+	imull	-112(%rbp), %eax
+	leal	(%rax,%rax), %edx
+	movq	-136(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movl	$0, -120(%rbp)
+	jmp	.L1955
+.L1962:
+	movl	-120(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -56(%rbp)
+	movl	-120(%rbp), %eax
+	cmpl	-112(%rbp), %eax
+	jl	.L1956
+	movl	$0, -116(%rbp)
+	jmp	.L1957
+.L1960:
+	cmpl	$3, -120(%rbp)
+	jne	.L1958
+	movl	$-1, %edx
+	jmp	.L1959
+.L1958:
+	movl	$0, %edx
+.L1959:
+	movq	-56(%rbp), %rax
+	movb	%dl, (%rax)
+	addl	$1, -116(%rbp)
+	addq	$4, -56(%rbp)
+.L1957:
+	movl	-116(%rbp), %eax
+	cmpl	-92(%rbp), %eax
+	jl	.L1960
+	jmp	.L1961
+.L1956:
+	movl	-92(%rbp), %edx
+	movq	-56(%rbp), %rcx
+	movq	-136(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__psd_decode_rle
+	testl	%eax, %eax
+	jne	.L1961
+	movq	-64(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L1941
+.L1961:
+	addl	$1, -120(%rbp)
+.L1955:
+	cmpl	$3, -120(%rbp)
+	jle	.L1962
+	jmp	.L1963
+.L1954:
+	movl	$0, -120(%rbp)
+	jmp	.L1964
+.L1985:
+	movl	-120(%rbp), %eax
+	cmpl	-112(%rbp), %eax
+	jl	.L1965
+	cmpl	$16, -100(%rbp)
+	jne	.L1966
+	cmpl	$16, 16(%rbp)
+	jne	.L1966
+	movl	-120(%rbp), %eax
+	cltq
+	leaq	(%rax,%rax), %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -48(%rbp)
+	cmpl	$3, -120(%rbp)
+	jne	.L1967
+	movl	$-1, %eax
+	jmp	.L1968
+.L1967:
+	movl	$0, %eax
+.L1968:
+	movw	%ax, -122(%rbp)
+	movl	$0, -116(%rbp)
+	jmp	.L1969
+.L1970:
+	movq	-48(%rbp), %rax
+	movzwl	-122(%rbp), %edx
+	movw	%dx, (%rax)
+	addl	$1, -116(%rbp)
+	addq	$8, -48(%rbp)
+.L1969:
+	movl	-116(%rbp), %eax
+	cmpl	-92(%rbp), %eax
+	jl	.L1970
+	jmp	.L1976
+.L1966:
+	movl	-120(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -40(%rbp)
+	cmpl	$3, -120(%rbp)
+	jne	.L1972
+	movl	$-1, %eax
+	jmp	.L1973
+.L1972:
+	movl	$0, %eax
+.L1973:
+	movb	%al, -123(%rbp)
+	movl	$0, -116(%rbp)
+	jmp	.L1974
+.L1975:
+	movq	-40(%rbp), %rax
+	movzbl	-123(%rbp), %edx
+	movb	%dl, (%rax)
+	addl	$1, -116(%rbp)
+	addq	$4, -40(%rbp)
+.L1974:
+	movl	-116(%rbp), %eax
+	cmpl	-92(%rbp), %eax
+	jl	.L1975
+	jmp	.L1976
+.L1965:
+	movq	-176(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	$16, %eax
+	jne	.L1977
+	movl	-120(%rbp), %eax
+	cltq
+	leaq	(%rax,%rax), %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -32(%rbp)
+	movl	$0, -116(%rbp)
+	jmp	.L1978
+.L1979:
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, %edx
+	movq	-32(%rbp), %rax
+	movw	%dx, (%rax)
+	addl	$1, -116(%rbp)
+	addq	$8, -32(%rbp)
+.L1978:
+	movl	-116(%rbp), %eax
+	cmpl	-92(%rbp), %eax
+	jl	.L1979
+	jmp	.L1976
+.L1977:
+	movl	-120(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -24(%rbp)
+	cmpl	$16, -100(%rbp)
+	jne	.L1980
+	movl	$0, -116(%rbp)
+	jmp	.L1981
+.L1982:
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	sarl	$8, %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movb	%dl, (%rax)
+	addl	$1, -116(%rbp)
+	addq	$4, -24(%rbp)
+.L1981:
+	movl	-116(%rbp), %eax
+	cmpl	-92(%rbp), %eax
+	jl	.L1982
+	jmp	.L1976
+.L1980:
+	movl	$0, -116(%rbp)
+	jmp	.L1983
+.L1984:
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movq	-24(%rbp), %rdx
+	movb	%al, (%rdx)
+	addl	$1, -116(%rbp)
+	addq	$4, -24(%rbp)
+.L1983:
+	movl	-116(%rbp), %eax
+	cmpl	-92(%rbp), %eax
+	jl	.L1984
+.L1976:
+	addl	$1, -120(%rbp)
+.L1964:
+	cmpl	$3, -120(%rbp)
+	jle	.L1985
+.L1963:
+	cmpl	$3, -112(%rbp)
+	jle	.L1986
+	movq	-176(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	$16, %eax
+	jne	.L1987
+	movl	$0, -116(%rbp)
+	jmp	.L1988
+.L1990:
+	movl	-116(%rbp), %eax
+	sall	$2, %eax
+	cltq
+	leaq	(%rax,%rax), %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -8(%rbp)
+	movq	-8(%rbp), %rax
+	addq	$6, %rax
+	movzwl	(%rax), %eax
+	testw	%ax, %ax
+	je	.L1989
+	movq	-8(%rbp), %rax
+	addq	$6, %rax
+	movzwl	(%rax), %eax
+	cmpw	$-1, %ax
+	je	.L1989
+	movq	-8(%rbp), %rax
+	addq	$6, %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %eax
+	pxor	%xmm0, %xmm0
+	cvtsi2ssl	%eax, %xmm0
+	movss	.LC83(%rip), %xmm1
+	divss	%xmm1, %xmm0
+	movss	%xmm0, -76(%rbp)
+	movss	.LC3(%rip), %xmm0
+	divss	-76(%rbp), %xmm0
+	movss	%xmm0, -72(%rbp)
+	movss	.LC3(%rip), %xmm0
+	movaps	%xmm0, %xmm1
+	subss	-72(%rbp), %xmm1
+	movss	.LC83(%rip), %xmm0
+	mulss	%xmm1, %xmm0
+	movss	%xmm0, -68(%rbp)
+	movq	-8(%rbp), %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %eax
+	pxor	%xmm0, %xmm0
+	cvtsi2ssl	%eax, %xmm0
+	mulss	-72(%rbp), %xmm0
+	addss	-68(%rbp), %xmm0
+	cvttss2sil	%xmm0, %eax
+	movl	%eax, %edx
+	movq	-8(%rbp), %rax
+	movw	%dx, (%rax)
+	movq	-8(%rbp), %rax
+	addq	$2, %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %eax
+	pxor	%xmm0, %xmm0
+	cvtsi2ssl	%eax, %xmm0
+	mulss	-72(%rbp), %xmm0
+	addss	-68(%rbp), %xmm0
+	movq	-8(%rbp), %rax
+	addq	$2, %rax
+	cvttss2sil	%xmm0, %edx
+	movw	%dx, (%rax)
+	movq	-8(%rbp), %rax
+	addq	$4, %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %eax
+	pxor	%xmm0, %xmm0
+	cvtsi2ssl	%eax, %xmm0
+	mulss	-72(%rbp), %xmm0
+	addss	-68(%rbp), %xmm0
+	movq	-8(%rbp), %rax
+	addq	$4, %rax
+	cvttss2sil	%xmm0, %edx
+	movw	%dx, (%rax)
+.L1989:
+	addl	$1, -116(%rbp)
+.L1988:
+	movl	-104(%rbp), %eax
+	imull	-108(%rbp), %eax
+	cmpl	%eax, -116(%rbp)
+	jl	.L1990
+	jmp	.L1986
+.L1987:
+	movl	$0, -116(%rbp)
+	jmp	.L1991
+.L1993:
+	movl	-116(%rbp), %eax
+	sall	$2, %eax
+	movslq	%eax, %rdx
+	movq	-64(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -16(%rbp)
+	movq	-16(%rbp), %rax
+	addq	$3, %rax
+	movzbl	(%rax), %eax
+	testb	%al, %al
+	je	.L1992
+	movq	-16(%rbp), %rax
+	addq	$3, %rax
+	movzbl	(%rax), %eax
+	cmpb	$-1, %al
+	je	.L1992
+	movq	-16(%rbp), %rax
+	addq	$3, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	pxor	%xmm0, %xmm0
+	cvtsi2ssl	%eax, %xmm0
+	movss	.LC6(%rip), %xmm1
+	divss	%xmm1, %xmm0
+	movss	%xmm0, -88(%rbp)
+	movss	.LC3(%rip), %xmm0
+	divss	-88(%rbp), %xmm0
+	movss	%xmm0, -84(%rbp)
+	movss	.LC3(%rip), %xmm0
+	movaps	%xmm0, %xmm1
+	subss	-84(%rbp), %xmm1
+	movss	.LC6(%rip), %xmm0
+	mulss	%xmm1, %xmm0
+	movss	%xmm0, -80(%rbp)
+	movq	-16(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	pxor	%xmm0, %xmm0
+	cvtsi2ssl	%eax, %xmm0
+	mulss	-84(%rbp), %xmm0
+	addss	-80(%rbp), %xmm0
+	cvttss2sil	%xmm0, %eax
+	movl	%eax, %edx
+	movq	-16(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-16(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	pxor	%xmm0, %xmm0
+	cvtsi2ssl	%eax, %xmm0
+	mulss	-84(%rbp), %xmm0
+	addss	-80(%rbp), %xmm0
+	movq	-16(%rbp), %rax
+	addq	$1, %rax
+	cvttss2sil	%xmm0, %edx
+	movb	%dl, (%rax)
+	movq	-16(%rbp), %rax
+	addq	$2, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	pxor	%xmm0, %xmm0
+	cvtsi2ssl	%eax, %xmm0
+	mulss	-84(%rbp), %xmm0
+	addss	-80(%rbp), %xmm0
+	movq	-16(%rbp), %rax
+	addq	$2, %rax
+	cvttss2sil	%xmm0, %edx
+	movb	%dl, (%rax)
+.L1992:
+	addl	$1, -116(%rbp)
+.L1991:
+	movl	-104(%rbp), %eax
+	imull	-108(%rbp), %eax
+	cmpl	%eax, -116(%rbp)
+	jl	.L1993
+.L1986:
+	cmpl	$0, -164(%rbp)
+	je	.L1994
+	cmpl	$4, -164(%rbp)
+	je	.L1994
+	movq	-176(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	$16, %eax
+	jne	.L1995
+	movl	-108(%rbp), %esi
+	movl	-104(%rbp), %ecx
+	movl	-164(%rbp), %edx
+	movq	-64(%rbp), %rax
+	movl	%esi, %r8d
+	movl	$4, %esi
+	movq	%rax, %rdi
+	call	stbi__convert_format16
+	movq	%rax, -64(%rbp)
+	jmp	.L1996
+.L1995:
+	movl	-108(%rbp), %esi
+	movl	-104(%rbp), %ecx
+	movl	-164(%rbp), %edx
+	movq	-64(%rbp), %rax
+	movl	%esi, %r8d
+	movl	$4, %esi
+	movq	%rax, %rdi
+	call	stbi__convert_format
+	movq	%rax, -64(%rbp)
+.L1996:
+	cmpq	$0, -64(%rbp)
+	jne	.L1994
+	movq	-64(%rbp), %rax
+	jmp	.L1941
+.L1994:
+	cmpq	$0, -160(%rbp)
+	je	.L1997
+	movq	-160(%rbp), %rax
+	movl	$4, (%rax)
+.L1997:
+	movq	-152(%rbp), %rax
+	movl	-108(%rbp), %edx
+	movl	%edx, (%rax)
+	movq	-144(%rbp), %rax
+	movl	-104(%rbp), %edx
+	movl	%edx, (%rax)
+	movq	-64(%rbp), %rax
+.L1941:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5048:
+	.size	stbi__psd_load, .-stbi__psd_load
+	.type	stbi__pic_is4, @function
+stbi__pic_is4:
+.LFB5049:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movl	$0, -4(%rbp)
+	jmp	.L1999
+.L2002:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movl	-4(%rbp), %edx
+	movslq	%edx, %rcx
+	movq	-32(%rbp), %rdx
+	addq	%rcx, %rdx
+	movzbl	(%rdx), %edx
+	cmpb	%dl, %al
+	je	.L2000
+	movl	$0, %eax
+	jmp	.L2001
+.L2000:
+	addl	$1, -4(%rbp)
+.L1999:
+	cmpl	$3, -4(%rbp)
+	jle	.L2002
+	movl	$1, %eax
+.L2001:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5049:
+	.size	stbi__pic_is4, .-stbi__pic_is4
+	.section	.rodata
+.LC84:
+	.string	"S\200\3664"
+.LC85:
+	.string	"PICT"
+	.text
+	.type	stbi__pic_test_core, @function
+stbi__pic_test_core:
+.LFB5050:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	leaq	.LC84(%rip), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__pic_is4
+	testl	%eax, %eax
+	jne	.L2004
+	movl	$0, %eax
+	jmp	.L2005
+.L2004:
+	movl	$0, -4(%rbp)
+	jmp	.L2006
+.L2007:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	addl	$1, -4(%rbp)
+.L2006:
+	cmpl	$83, -4(%rbp)
+	jle	.L2007
+	movq	-24(%rbp), %rax
+	leaq	.LC85(%rip), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__pic_is4
+	testl	%eax, %eax
+	jne	.L2008
+	movl	$0, %eax
+	jmp	.L2005
+.L2008:
+	movl	$1, %eax
+.L2005:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5050:
+	.size	stbi__pic_test_core, .-stbi__pic_test_core
+	.type	stbi__readval, @function
+stbi__readval:
+.LFB5051:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$56, %rsp
+	.cfi_offset 3, -24
+	movq	%rdi, -40(%rbp)
+	movl	%esi, -44(%rbp)
+	movq	%rdx, -56(%rbp)
+	movl	$128, -24(%rbp)
+	movl	$0, -20(%rbp)
+	jmp	.L2010
+.L2014:
+	movl	-44(%rbp), %eax
+	andl	-24(%rbp), %eax
+	testl	%eax, %eax
+	je	.L2011
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__at_eof
+	testl	%eax, %eax
+	je	.L2012
+	movl	$0, %eax
+	jmp	.L2013
+.L2012:
+	movl	-20(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-56(%rbp), %rax
+	leaq	(%rdx,%rax), %rbx
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, (%rbx)
+.L2011:
+	addl	$1, -20(%rbp)
+	sarl	-24(%rbp)
+.L2010:
+	cmpl	$3, -20(%rbp)
+	jle	.L2014
+	movq	-56(%rbp), %rax
+.L2013:
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5051:
+	.size	stbi__readval, .-stbi__readval
+	.type	stbi__copyval, @function
+stbi__copyval:
+.LFB5052:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, -20(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movl	$128, -8(%rbp)
+	movl	$0, -4(%rbp)
+	jmp	.L2016
+.L2018:
+	movl	-20(%rbp), %eax
+	andl	-8(%rbp), %eax
+	testl	%eax, %eax
+	je	.L2017
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-40(%rbp), %rax
+	addq	%rdx, %rax
+	movl	-4(%rbp), %edx
+	movslq	%edx, %rcx
+	movq	-32(%rbp), %rdx
+	addq	%rcx, %rdx
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+.L2017:
+	addl	$1, -4(%rbp)
+	sarl	-8(%rbp)
+.L2016:
+	cmpl	$3, -4(%rbp)
+	jle	.L2018
+	nop
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5052:
+	.size	stbi__copyval, .-stbi__copyval
+	.type	stbi__pic_load_core, @function
+stbi__pic_load_core:
+.LFB5053:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$160, %rsp
+	movq	%rdi, -136(%rbp)
+	movl	%esi, -140(%rbp)
+	movl	%edx, -144(%rbp)
+	movq	%rcx, -152(%rbp)
+	movq	%r8, -160(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movl	$0, -116(%rbp)
+	movl	$0, -112(%rbp)
+.L2024:
+	cmpl	$10, -112(%rbp)
+	jne	.L2020
+	movl	$0, %eax
+	jmp	.L2063
+.L2020:
+	movl	-112(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -112(%rbp)
+	leaq	-48(%rbp), %rcx
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	addq	%rcx, %rax
+	movq	%rax, -64(%rbp)
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -76(%rbp)
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movq	-64(%rbp), %rdx
+	movb	%al, (%rdx)
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movq	-64(%rbp), %rdx
+	movb	%al, 1(%rdx)
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movq	-64(%rbp), %rdx
+	movb	%al, 2(%rdx)
+	movq	-64(%rbp), %rax
+	movzbl	2(%rax), %eax
+	movzbl	%al, %eax
+	orl	%eax, -116(%rbp)
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__at_eof
+	testl	%eax, %eax
+	je	.L2022
+	movl	$0, %eax
+	jmp	.L2063
+.L2022:
+	movq	-64(%rbp), %rax
+	movzbl	(%rax), %eax
+	cmpb	$8, %al
+	je	.L2023
+	movl	$0, %eax
+	jmp	.L2063
+.L2023:
+	cmpl	$0, -76(%rbp)
+	jne	.L2024
+	movl	-116(%rbp), %eax
+	andl	$16, %eax
+	testl	%eax, %eax
+	je	.L2025
+	movl	$4, %edx
+	jmp	.L2026
+.L2025:
+	movl	$3, %edx
+.L2026:
+	movq	-152(%rbp), %rax
+	movl	%edx, (%rax)
+	movl	$0, -108(%rbp)
+	jmp	.L2027
+.L2062:
+	movl	$0, -104(%rbp)
+	jmp	.L2028
+.L2061:
+	leaq	-48(%rbp), %rcx
+	movl	-104(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	addq	%rcx, %rax
+	movq	%rax, -56(%rbp)
+	movl	-108(%rbp), %eax
+	imull	-140(%rbp), %eax
+	sall	$2, %eax
+	movslq	%eax, %rdx
+	movq	-160(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -72(%rbp)
+	movq	-56(%rbp), %rax
+	movzbl	1(%rax), %eax
+	movzbl	%al, %eax
+	cmpl	$2, %eax
+	je	.L2029
+	cmpl	$2, %eax
+	jg	.L2030
+	testl	%eax, %eax
+	je	.L2031
+	cmpl	$1, %eax
+	je	.L2032
+.L2030:
+	movl	$0, %eax
+	jmp	.L2063
+.L2031:
+	movl	$0, -100(%rbp)
+	jmp	.L2033
+.L2035:
+	movq	-56(%rbp), %rax
+	movzbl	2(%rax), %eax
+	movzbl	%al, %ecx
+	movq	-72(%rbp), %rdx
+	movq	-136(%rbp), %rax
+	movl	%ecx, %esi
+	movq	%rax, %rdi
+	call	stbi__readval
+	testq	%rax, %rax
+	jne	.L2034
+	movl	$0, %eax
+	jmp	.L2063
+.L2034:
+	addl	$1, -100(%rbp)
+	addq	$4, -72(%rbp)
+.L2033:
+	movl	-100(%rbp), %eax
+	cmpl	-140(%rbp), %eax
+	jl	.L2035
+	jmp	.L2036
+.L2032:
+	movl	-140(%rbp), %eax
+	movl	%eax, -96(%rbp)
+	jmp	.L2037
+.L2044:
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -117(%rbp)
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__at_eof
+	testl	%eax, %eax
+	je	.L2038
+	movl	$0, %eax
+	jmp	.L2063
+.L2038:
+	movzbl	-117(%rbp), %eax
+	cmpl	%eax, -96(%rbp)
+	jge	.L2040
+	movl	-96(%rbp), %eax
+	movb	%al, -117(%rbp)
+.L2040:
+	movq	-56(%rbp), %rax
+	movzbl	2(%rax), %eax
+	movzbl	%al, %ecx
+	leaq	-12(%rbp), %rdx
+	movq	-136(%rbp), %rax
+	movl	%ecx, %esi
+	movq	%rax, %rdi
+	call	stbi__readval
+	testq	%rax, %rax
+	jne	.L2041
+	movl	$0, %eax
+	jmp	.L2063
+.L2041:
+	movl	$0, -92(%rbp)
+	jmp	.L2042
+.L2043:
+	movq	-56(%rbp), %rax
+	movzbl	2(%rax), %eax
+	movzbl	%al, %eax
+	leaq	-12(%rbp), %rdx
+	movq	-72(%rbp), %rcx
+	movq	%rcx, %rsi
+	movl	%eax, %edi
+	call	stbi__copyval
+	addl	$1, -92(%rbp)
+	addq	$4, -72(%rbp)
+.L2042:
+	movzbl	-117(%rbp), %eax
+	cmpl	%eax, -92(%rbp)
+	jl	.L2043
+	movzbl	-117(%rbp), %eax
+	subl	%eax, -96(%rbp)
+.L2037:
+	cmpl	$0, -96(%rbp)
+	jg	.L2044
+	jmp	.L2036
+.L2029:
+	movl	-140(%rbp), %eax
+	movl	%eax, -88(%rbp)
+	jmp	.L2045
+.L2060:
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -84(%rbp)
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__at_eof
+	testl	%eax, %eax
+	je	.L2046
+	movl	$0, %eax
+	jmp	.L2063
+.L2046:
+	cmpl	$127, -84(%rbp)
+	jle	.L2047
+	cmpl	$128, -84(%rbp)
+	jne	.L2048
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, -84(%rbp)
+	jmp	.L2049
+.L2048:
+	subl	$127, -84(%rbp)
+.L2049:
+	movl	-84(%rbp), %eax
+	cmpl	-88(%rbp), %eax
+	jle	.L2050
+	movl	$0, %eax
+	jmp	.L2063
+.L2050:
+	movq	-56(%rbp), %rax
+	movzbl	2(%rax), %eax
+	movzbl	%al, %ecx
+	leaq	-12(%rbp), %rdx
+	movq	-136(%rbp), %rax
+	movl	%ecx, %esi
+	movq	%rax, %rdi
+	call	stbi__readval
+	testq	%rax, %rax
+	jne	.L2052
+	movl	$0, %eax
+	jmp	.L2063
+.L2052:
+	movl	$0, -80(%rbp)
+	jmp	.L2053
+.L2054:
+	movq	-56(%rbp), %rax
+	movzbl	2(%rax), %eax
+	movzbl	%al, %eax
+	leaq	-12(%rbp), %rdx
+	movq	-72(%rbp), %rcx
+	movq	%rcx, %rsi
+	movl	%eax, %edi
+	call	stbi__copyval
+	addl	$1, -80(%rbp)
+	addq	$4, -72(%rbp)
+.L2053:
+	movl	-80(%rbp), %eax
+	cmpl	-84(%rbp), %eax
+	jl	.L2054
+	jmp	.L2055
+.L2047:
+	addl	$1, -84(%rbp)
+	movl	-84(%rbp), %eax
+	cmpl	-88(%rbp), %eax
+	jle	.L2056
+	movl	$0, %eax
+	jmp	.L2063
+.L2056:
+	movl	$0, -80(%rbp)
+	jmp	.L2057
+.L2059:
+	movq	-56(%rbp), %rax
+	movzbl	2(%rax), %eax
+	movzbl	%al, %ecx
+	movq	-72(%rbp), %rdx
+	movq	-136(%rbp), %rax
+	movl	%ecx, %esi
+	movq	%rax, %rdi
+	call	stbi__readval
+	testq	%rax, %rax
+	jne	.L2058
+	movl	$0, %eax
+	jmp	.L2063
+.L2058:
+	addl	$1, -80(%rbp)
+	addq	$4, -72(%rbp)
+.L2057:
+	movl	-80(%rbp), %eax
+	cmpl	-84(%rbp), %eax
+	jl	.L2059
+.L2055:
+	movl	-84(%rbp), %eax
+	subl	%eax, -88(%rbp)
+.L2045:
+	cmpl	$0, -88(%rbp)
+	jg	.L2060
+	nop
+.L2036:
+	addl	$1, -104(%rbp)
+.L2028:
+	movl	-104(%rbp), %eax
+	cmpl	-112(%rbp), %eax
+	jl	.L2061
+	addl	$1, -108(%rbp)
+.L2027:
+	movl	-108(%rbp), %eax
+	cmpl	-144(%rbp), %eax
+	jl	.L2062
+	movq	-160(%rbp), %rax
+.L2063:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2064
+	call	__stack_chk_fail@PLT
+.L2064:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5053:
+	.size	stbi__pic_load_core, .-stbi__pic_load_core
+	.type	stbi__pic_load, @function
+stbi__pic_load:
+.LFB5054:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$80, %rsp
+	movq	%rdi, -40(%rbp)
+	movq	%rsi, -48(%rbp)
+	movq	%rdx, -56(%rbp)
+	movq	%rcx, -64(%rbp)
+	movl	%r8d, -68(%rbp)
+	movq	%r9, -80(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	cmpq	$0, -64(%rbp)
+	jne	.L2066
+	leaq	-32(%rbp), %rax
+	movq	%rax, -64(%rbp)
+.L2066:
+	movl	$0, -28(%rbp)
+	jmp	.L2067
+.L2068:
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	addl	$1, -28(%rbp)
+.L2067:
+	cmpl	$91, -28(%rbp)
+	jle	.L2068
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, -24(%rbp)
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, -20(%rbp)
+	cmpl	$16777216, -20(%rbp)
+	jle	.L2069
+	movl	$0, %eax
+	jmp	.L2077
+.L2069:
+	cmpl	$16777216, -24(%rbp)
+	jle	.L2071
+	movl	$0, %eax
+	jmp	.L2077
+.L2071:
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__at_eof
+	testl	%eax, %eax
+	je	.L2072
+	movl	$0, %eax
+	jmp	.L2077
+.L2072:
+	movl	-20(%rbp), %esi
+	movl	-24(%rbp), %eax
+	movl	$0, %ecx
+	movl	$4, %edx
+	movl	%eax, %edi
+	call	stbi__mad3sizes_valid
+	testl	%eax, %eax
+	jne	.L2073
+	movl	$0, %eax
+	jmp	.L2077
+.L2073:
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	-20(%rbp), %esi
+	movl	-24(%rbp), %eax
+	movl	$0, %ecx
+	movl	$4, %edx
+	movl	%eax, %edi
+	call	stbi__malloc_mad3
+	movq	%rax, -16(%rbp)
+	cmpq	$0, -16(%rbp)
+	jne	.L2074
+	movl	$0, %eax
+	jmp	.L2077
+.L2074:
+	movl	-24(%rbp), %eax
+	imull	-20(%rbp), %eax
+	sall	$2, %eax
+	movslq	%eax, %rdx
+	movq	-16(%rbp), %rax
+	movl	$255, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+	movq	-16(%rbp), %rdi
+	movq	-64(%rbp), %rcx
+	movl	-20(%rbp), %edx
+	movl	-24(%rbp), %esi
+	movq	-40(%rbp), %rax
+	movq	%rdi, %r8
+	movq	%rax, %rdi
+	call	stbi__pic_load_core
+	testq	%rax, %rax
+	jne	.L2075
+	movq	-16(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	$0, -16(%rbp)
+.L2075:
+	movq	-48(%rbp), %rax
+	movl	-24(%rbp), %edx
+	movl	%edx, (%rax)
+	movq	-56(%rbp), %rax
+	movl	-20(%rbp), %edx
+	movl	%edx, (%rax)
+	cmpl	$0, -68(%rbp)
+	jne	.L2076
+	movq	-64(%rbp), %rax
+	movl	(%rax), %eax
+	movl	%eax, -68(%rbp)
+.L2076:
+	movl	-20(%rbp), %esi
+	movl	-24(%rbp), %ecx
+	movl	-68(%rbp), %edx
+	movq	-16(%rbp), %rax
+	movl	%esi, %r8d
+	movl	$4, %esi
+	movq	%rax, %rdi
+	call	stbi__convert_format
+	movq	%rax, -16(%rbp)
+	movq	-16(%rbp), %rax
+.L2077:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2078
+	call	__stack_chk_fail@PLT
+.L2078:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5054:
+	.size	stbi__pic_load, .-stbi__pic_load
+	.type	stbi__pic_test, @function
+stbi__pic_test:
+.LFB5055:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__pic_test_core
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	-4(%rbp), %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5055:
+	.size	stbi__pic_test, .-stbi__pic_test
+	.type	stbi__gif_test_raw, @function
+stbi__gif_test_raw:
+.LFB5056:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	cmpb	$71, %al
+	jne	.L2082
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	cmpb	$73, %al
+	jne	.L2082
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	cmpb	$70, %al
+	jne	.L2082
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	cmpb	$56, %al
+	je	.L2083
+.L2082:
+	movl	$0, %eax
+	jmp	.L2084
+.L2083:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -4(%rbp)
+	cmpl	$57, -4(%rbp)
+	je	.L2085
+	cmpl	$55, -4(%rbp)
+	je	.L2085
+	movl	$0, %eax
+	jmp	.L2084
+.L2085:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	cmpb	$97, %al
+	je	.L2086
+	movl	$0, %eax
+	jmp	.L2084
+.L2086:
+	movl	$1, %eax
+.L2084:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5056:
+	.size	stbi__gif_test_raw, .-stbi__gif_test_raw
+	.type	stbi__gif_test, @function
+stbi__gif_test:
+.LFB5057:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__gif_test_raw
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	-4(%rbp), %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5057:
+	.size	stbi__gif_test, .-stbi__gif_test
+	.type	stbi__gif_parse_colortable, @function
+stbi__gif_parse_colortable:
+.LFB5058:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$56, %rsp
+	.cfi_offset 3, -24
+	movq	%rdi, -40(%rbp)
+	movq	%rsi, -48(%rbp)
+	movl	%edx, -52(%rbp)
+	movl	%ecx, -56(%rbp)
+	movl	$0, -20(%rbp)
+	jmp	.L2090
+.L2093:
+	movl	-20(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	movq	-48(%rbp), %rax
+	leaq	(%rdx,%rax), %rbx
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, 2(%rbx)
+	movl	-20(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	movq	-48(%rbp), %rax
+	leaq	(%rdx,%rax), %rbx
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, 1(%rbx)
+	movl	-20(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	movq	-48(%rbp), %rax
+	leaq	(%rdx,%rax), %rbx
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, (%rbx)
+	movl	-56(%rbp), %eax
+	cmpl	-20(%rbp), %eax
+	jne	.L2091
+	movl	$0, %ecx
+	jmp	.L2092
+.L2091:
+	movl	$-1, %ecx
+.L2092:
+	movl	-20(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	movq	-48(%rbp), %rax
+	addq	%rdx, %rax
+	movb	%cl, 3(%rax)
+	addl	$1, -20(%rbp)
+.L2090:
+	movl	-20(%rbp), %eax
+	cmpl	-52(%rbp), %eax
+	jl	.L2093
+	nop
+	nop
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5058:
+	.size	stbi__gif_parse_colortable, .-stbi__gif_parse_colortable
+	.section	.rodata
+.LC86:
+	.string	"not GIF"
+.LC87:
+	.string	""
+	.text
+	.type	stbi__gif_header, @function
+stbi__gif_header:
+.LFB5059:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movl	%ecx, -44(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	cmpb	$71, %al
+	jne	.L2095
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	cmpb	$73, %al
+	jne	.L2095
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	cmpb	$70, %al
+	jne	.L2095
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	cmpb	$56, %al
+	je	.L2096
+.L2095:
+	leaq	.LC86(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L2097
+.L2096:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -1(%rbp)
+	cmpb	$55, -1(%rbp)
+	je	.L2098
+	cmpb	$57, -1(%rbp)
+	je	.L2098
+	leaq	.LC86(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L2097
+.L2098:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	cmpb	$97, %al
+	je	.L2099
+	leaq	.LC86(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L2097
+.L2099:
+	leaq	.LC87(%rip), %rax
+	movq	%rax, %fs:stbi__g_failure_reason@tpoff
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movq	-32(%rbp), %rdx
+	movl	%eax, (%rdx)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movq	-32(%rbp), %rdx
+	movl	%eax, 4(%rdx)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %edx
+	movq	-32(%rbp), %rax
+	movl	%edx, 32(%rax)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %edx
+	movq	-32(%rbp), %rax
+	movl	%edx, 36(%rax)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %edx
+	movq	-32(%rbp), %rax
+	movl	%edx, 40(%rax)
+	movq	-32(%rbp), %rax
+	movl	$-1, 44(%rax)
+	movq	-32(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	$16777216, %eax
+	jle	.L2100
+	leaq	.LC32(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L2097
+.L2100:
+	movq	-32(%rbp), %rax
+	movl	4(%rax), %eax
+	cmpl	$16777216, %eax
+	jle	.L2101
+	leaq	.LC32(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L2097
+.L2101:
+	cmpq	$0, -40(%rbp)
+	je	.L2102
+	movq	-40(%rbp), %rax
+	movl	$4, (%rax)
+.L2102:
+	cmpl	$0, -44(%rbp)
+	je	.L2103
+	movl	$1, %eax
+	jmp	.L2097
+.L2103:
+	movq	-32(%rbp), %rax
+	movl	32(%rax), %eax
+	andl	$128, %eax
+	testl	%eax, %eax
+	je	.L2104
+	movq	-32(%rbp), %rax
+	movl	32(%rax), %eax
+	andl	$7, %eax
+	movl	$2, %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movq	-32(%rbp), %rax
+	leaq	52(%rax), %rsi
+	movq	-24(%rbp), %rax
+	movl	$-1, %ecx
+	movq	%rax, %rdi
+	call	stbi__gif_parse_colortable
+.L2104:
+	movl	$1, %eax
+.L2097:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5059:
+	.size	stbi__gif_header, .-stbi__gif_header
+	.type	stbi__gif_info_raw, @function
+stbi__gif_info_raw:
+.LFB5060:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movq	%rcx, -48(%rbp)
+	movl	$34928, %edi
+	call	stbi__malloc
+	movq	%rax, -8(%rbp)
+	cmpq	$0, -8(%rbp)
+	jne	.L2106
+	leaq	.LC37(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L2107
+.L2106:
+	movq	-48(%rbp), %rdx
+	movq	-8(%rbp), %rsi
+	movq	-24(%rbp), %rax
+	movl	$1, %ecx
+	movq	%rax, %rdi
+	call	stbi__gif_header
+	testl	%eax, %eax
+	jne	.L2108
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2107
+.L2108:
+	cmpq	$0, -32(%rbp)
+	je	.L2109
+	movq	-8(%rbp), %rax
+	movl	(%rax), %edx
+	movq	-32(%rbp), %rax
+	movl	%edx, (%rax)
+.L2109:
+	cmpq	$0, -40(%rbp)
+	je	.L2110
+	movq	-8(%rbp), %rax
+	movl	4(%rax), %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, (%rax)
+.L2110:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$1, %eax
+.L2107:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5060:
+	.size	stbi__gif_info_raw, .-stbi__gif_info_raw
+	.type	stbi__out_gif_code, @function
+stbi__out_gif_code:
+.LFB5061:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -40(%rbp)
+	movl	%esi, %eax
+	movw	%ax, -44(%rbp)
+	movzwl	-44(%rbp), %edx
+	movq	-40(%rbp), %rax
+	movslq	%edx, %rdx
+	addq	$524, %rdx
+	movzwl	4(%rax,%rdx,4), %eax
+	testw	%ax, %ax
+	js	.L2112
+	movzwl	-44(%rbp), %edx
+	movq	-40(%rbp), %rax
+	movslq	%edx, %rdx
+	addq	$524, %rdx
+	movzwl	4(%rax,%rdx,4), %eax
+	movzwl	%ax, %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__out_gif_code
+.L2112:
+	movq	-40(%rbp), %rax
+	movl	34912(%rax), %edx
+	movq	-40(%rbp), %rax
+	movl	34904(%rax), %eax
+	cmpl	%eax, %edx
+	jge	.L2118
+	movq	-40(%rbp), %rax
+	movl	34908(%rax), %edx
+	movq	-40(%rbp), %rax
+	movl	34912(%rax), %eax
+	addl	%edx, %eax
+	movl	%eax, -20(%rbp)
+	movq	-40(%rbp), %rax
+	movq	8(%rax), %rdx
+	movl	-20(%rbp), %eax
+	cltq
+	addq	%rdx, %rax
+	movq	%rax, -16(%rbp)
+	movq	-40(%rbp), %rax
+	movq	24(%rax), %rdx
+	movl	-20(%rbp), %eax
+	leal	3(%rax), %ecx
+	testl	%eax, %eax
+	cmovs	%ecx, %eax
+	sarl	$2, %eax
+	cltq
+	addq	%rdx, %rax
+	movb	$1, (%rax)
+	movq	-40(%rbp), %rax
+	movq	34872(%rax), %rdx
+	movzwl	-44(%rbp), %ecx
+	movq	-40(%rbp), %rax
+	movslq	%ecx, %rcx
+	addq	$524, %rcx
+	movzbl	7(%rax,%rcx,4), %eax
+	movzbl	%al, %eax
+	sall	$2, %eax
+	cltq
+	addq	%rdx, %rax
+	movq	%rax, -8(%rbp)
+	movq	-8(%rbp), %rax
+	addq	$3, %rax
+	movzbl	(%rax), %eax
+	cmpb	$-128, %al
+	jbe	.L2115
+	movq	-8(%rbp), %rax
+	movzbl	2(%rax), %edx
+	movq	-16(%rbp), %rax
+	movb	%dl, (%rax)
+	movq	-16(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	-8(%rbp), %rax
+	movzbl	1(%rax), %eax
+	movb	%al, (%rdx)
+	movq	-16(%rbp), %rax
+	leaq	2(%rax), %rdx
+	movq	-8(%rbp), %rax
+	movzbl	(%rax), %eax
+	movb	%al, (%rdx)
+	movq	-16(%rbp), %rax
+	leaq	3(%rax), %rdx
+	movq	-8(%rbp), %rax
+	movzbl	3(%rax), %eax
+	movb	%al, (%rdx)
+.L2115:
+	movq	-40(%rbp), %rax
+	movl	34908(%rax), %eax
+	leal	4(%rax), %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, 34908(%rax)
+	movq	-40(%rbp), %rax
+	movl	34908(%rax), %edx
+	movq	-40(%rbp), %rax
+	movl	34900(%rax), %eax
+	cmpl	%eax, %edx
+	jl	.L2111
+	movq	-40(%rbp), %rax
+	movl	34892(%rax), %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, 34908(%rax)
+	movq	-40(%rbp), %rax
+	movl	34912(%rax), %edx
+	movq	-40(%rbp), %rax
+	movl	34884(%rax), %eax
+	addl	%eax, %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, 34912(%rax)
+	jmp	.L2116
+.L2117:
+	movq	-40(%rbp), %rax
+	movl	34916(%rax), %edx
+	movq	-40(%rbp), %rax
+	movl	34880(%rax), %eax
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, 34884(%rax)
+	movq	-40(%rbp), %rax
+	movl	34896(%rax), %edx
+	movq	-40(%rbp), %rax
+	movl	34884(%rax), %eax
+	sarl	%eax
+	addl	%eax, %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, 34912(%rax)
+	movq	-40(%rbp), %rax
+	movl	34880(%rax), %eax
+	leal	-1(%rax), %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, 34880(%rax)
+.L2116:
+	movq	-40(%rbp), %rax
+	movl	34912(%rax), %edx
+	movq	-40(%rbp), %rax
+	movl	34904(%rax), %eax
+	cmpl	%eax, %edx
+	jl	.L2111
+	movq	-40(%rbp), %rax
+	movl	34880(%rax), %eax
+	testl	%eax, %eax
+	jg	.L2117
+	jmp	.L2111
+.L2118:
+	nop
+.L2111:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5061:
+	.size	stbi__out_gif_code, .-stbi__out_gif_code
+	.type	stbi__process_gif_raster, @function
+stbi__process_gif_raster:
+.LFB5062:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$80, %rsp
+	movq	%rdi, -72(%rbp)
+	movq	%rsi, -80(%rbp)
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -53(%rbp)
+	cmpb	$12, -53(%rbp)
+	jbe	.L2120
+	movl	$0, %eax
+	jmp	.L2121
+.L2120:
+	movzbl	-53(%rbp), %eax
+	movl	$1, %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	%edx, %eax
+	movl	%eax, -16(%rbp)
+	movl	$1, -44(%rbp)
+	movzbl	-53(%rbp), %eax
+	addl	$1, %eax
+	movl	%eax, -40(%rbp)
+	movl	-40(%rbp), %eax
+	movl	$1, %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	%edx, %eax
+	subl	$1, %eax
+	movl	%eax, -36(%rbp)
+	movl	$0, -24(%rbp)
+	movl	$0, -20(%rbp)
+	movl	$0, -48(%rbp)
+	jmp	.L2122
+.L2123:
+	movq	-80(%rbp), %rax
+	movl	-48(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$524, %rdx
+	movw	$-1, 4(%rax,%rdx,4)
+	movl	-48(%rbp), %eax
+	movl	%eax, %ecx
+	movq	-80(%rbp), %rax
+	movl	-48(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$524, %rdx
+	movb	%cl, 6(%rax,%rdx,4)
+	movl	-48(%rbp), %eax
+	movl	%eax, %ecx
+	movq	-80(%rbp), %rax
+	movl	-48(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$524, %rdx
+	movb	%cl, 7(%rax,%rdx,4)
+	addl	$1, -48(%rbp)
+.L2122:
+	movl	-48(%rbp), %eax
+	cmpl	-16(%rbp), %eax
+	jl	.L2123
+	movl	-16(%rbp), %eax
+	addl	$2, %eax
+	movl	%eax, -32(%rbp)
+	movl	$-1, -28(%rbp)
+	movl	$0, -52(%rbp)
+.L2139:
+	movl	-20(%rbp), %eax
+	cmpl	-40(%rbp), %eax
+	jge	.L2124
+	cmpl	$0, -52(%rbp)
+	jne	.L2125
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -52(%rbp)
+	cmpl	$0, -52(%rbp)
+	jne	.L2125
+	movq	-80(%rbp), %rax
+	movq	8(%rax), %rax
+	jmp	.L2121
+.L2125:
+	subl	$1, -52(%rbp)
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %edx
+	movl	-20(%rbp), %eax
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	%edx, %eax
+	orl	%eax, -24(%rbp)
+	addl	$8, -20(%rbp)
+	jmp	.L2139
+.L2124:
+	movl	-24(%rbp), %eax
+	andl	-36(%rbp), %eax
+	movl	%eax, -12(%rbp)
+	movl	-40(%rbp), %eax
+	movl	%eax, %ecx
+	sarl	%cl, -24(%rbp)
+	movl	-40(%rbp), %eax
+	subl	%eax, -20(%rbp)
+	movl	-12(%rbp), %eax
+	cmpl	-16(%rbp), %eax
+	jne	.L2127
+	movzbl	-53(%rbp), %eax
+	addl	$1, %eax
+	movl	%eax, -40(%rbp)
+	movl	-40(%rbp), %eax
+	movl	$1, %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	%edx, %eax
+	subl	$1, %eax
+	movl	%eax, -36(%rbp)
+	movl	-16(%rbp), %eax
+	addl	$2, %eax
+	movl	%eax, -32(%rbp)
+	movl	$-1, -28(%rbp)
+	movl	$0, -44(%rbp)
+	jmp	.L2139
+.L2127:
+	movl	-16(%rbp), %eax
+	addl	$1, %eax
+	cmpl	%eax, -12(%rbp)
+	jne	.L2128
+	movl	-52(%rbp), %edx
+	movq	-72(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	jmp	.L2129
+.L2130:
+	movl	-52(%rbp), %edx
+	movq	-72(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+.L2129:
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -52(%rbp)
+	cmpl	$0, -52(%rbp)
+	jg	.L2130
+	movq	-80(%rbp), %rax
+	movq	8(%rax), %rax
+	jmp	.L2121
+.L2128:
+	movl	-12(%rbp), %eax
+	cmpl	-32(%rbp), %eax
+	jg	.L2131
+	cmpl	$0, -44(%rbp)
+	je	.L2132
+	movl	$0, %eax
+	jmp	.L2121
+.L2132:
+	cmpl	$0, -28(%rbp)
+	js	.L2133
+	movl	-32(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -32(%rbp)
+	cltq
+	addq	$524, %rax
+	leaq	0(,%rax,4), %rdx
+	movq	-80(%rbp), %rax
+	addq	%rdx, %rax
+	addq	$4, %rax
+	movq	%rax, -8(%rbp)
+	cmpl	$8192, -32(%rbp)
+	jle	.L2134
+	movl	$0, %eax
+	jmp	.L2121
+.L2134:
+	movl	-28(%rbp), %eax
+	movl	%eax, %edx
+	movq	-8(%rbp), %rax
+	movw	%dx, (%rax)
+	movq	-80(%rbp), %rax
+	movl	-28(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$524, %rdx
+	movzbl	6(%rax,%rdx,4), %edx
+	movq	-8(%rbp), %rax
+	movb	%dl, 2(%rax)
+	movl	-12(%rbp), %eax
+	cmpl	-32(%rbp), %eax
+	jne	.L2135
+	movq	-8(%rbp), %rax
+	movzbl	2(%rax), %eax
+	jmp	.L2136
+.L2135:
+	movq	-80(%rbp), %rax
+	movl	-12(%rbp), %edx
+	movslq	%edx, %rdx
+	addq	$524, %rdx
+	movzbl	6(%rax,%rdx,4), %eax
+.L2136:
+	movq	-8(%rbp), %rdx
+	movb	%al, 3(%rdx)
+	jmp	.L2137
+.L2133:
+	movl	-12(%rbp), %eax
+	cmpl	-32(%rbp), %eax
+	jne	.L2137
+	movl	$0, %eax
+	jmp	.L2121
+.L2137:
+	movl	-12(%rbp), %eax
+	movzwl	%ax, %edx
+	movq	-80(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__out_gif_code
+	movl	-32(%rbp), %eax
+	andl	-36(%rbp), %eax
+	testl	%eax, %eax
+	jne	.L2138
+	cmpl	$4095, -32(%rbp)
+	jg	.L2138
+	addl	$1, -40(%rbp)
+	movl	-40(%rbp), %eax
+	movl	$1, %edx
+	movl	%eax, %ecx
+	sall	%cl, %edx
+	movl	%edx, %eax
+	subl	$1, %eax
+	movl	%eax, -36(%rbp)
+.L2138:
+	movl	-12(%rbp), %eax
+	movl	%eax, -28(%rbp)
+	jmp	.L2139
+.L2131:
+	movl	$0, %eax
+.L2121:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5062:
+	.size	stbi__process_gif_raster, .-stbi__process_gif_raster
+	.type	stbi__gif_load_next, @function
+stbi__gif_load_next:
+.LFB5063:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$112, %rsp
+	movq	%rdi, -72(%rbp)
+	movq	%rsi, -80(%rbp)
+	movq	%rdx, -88(%rbp)
+	movl	%ecx, -92(%rbp)
+	movq	%r8, -104(%rbp)
+	movl	$0, -48(%rbp)
+	movq	-80(%rbp), %rax
+	movq	8(%rax), %rax
+	testq	%rax, %rax
+	jne	.L2141
+	movq	-88(%rbp), %rdx
+	movq	-80(%rbp), %rsi
+	movq	-72(%rbp), %rax
+	movl	$0, %ecx
+	movq	%rax, %rdi
+	call	stbi__gif_header
+	testl	%eax, %eax
+	jne	.L2142
+	movl	$0, %eax
+	jmp	.L2143
+.L2142:
+	movq	-80(%rbp), %rax
+	movl	4(%rax), %edx
+	movq	-80(%rbp), %rax
+	movl	(%rax), %eax
+	movl	$0, %ecx
+	movl	%eax, %esi
+	movl	$4, %edi
+	call	stbi__mad3sizes_valid
+	testl	%eax, %eax
+	jne	.L2144
+	movl	$0, %eax
+	jmp	.L2143
+.L2144:
+	movq	-80(%rbp), %rax
+	movl	(%rax), %edx
+	movq	-80(%rbp), %rax
+	movl	4(%rax), %eax
+	imull	%edx, %eax
+	movl	%eax, -40(%rbp)
+	movl	-40(%rbp), %eax
+	sall	$2, %eax
+	cltq
+	movq	%rax, %rdi
+	call	stbi__malloc
+	movq	-80(%rbp), %rdx
+	movq	%rax, 8(%rdx)
+	movl	-40(%rbp), %eax
+	sall	$2, %eax
+	cltq
+	movq	%rax, %rdi
+	call	stbi__malloc
+	movq	-80(%rbp), %rdx
+	movq	%rax, 16(%rdx)
+	movl	-40(%rbp), %eax
+	cltq
+	movq	%rax, %rdi
+	call	stbi__malloc
+	movq	-80(%rbp), %rdx
+	movq	%rax, 24(%rdx)
+	movq	-80(%rbp), %rax
+	movq	8(%rax), %rax
+	testq	%rax, %rax
+	je	.L2145
+	movq	-80(%rbp), %rax
+	movq	16(%rax), %rax
+	testq	%rax, %rax
+	je	.L2145
+	movq	-80(%rbp), %rax
+	movq	24(%rax), %rax
+	testq	%rax, %rax
+	jne	.L2146
+.L2145:
+	movl	$0, %eax
+	jmp	.L2143
+.L2146:
+	movl	-40(%rbp), %eax
+	sall	$2, %eax
+	movslq	%eax, %rdx
+	movq	-80(%rbp), %rax
+	movq	8(%rax), %rax
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+	movl	-40(%rbp), %eax
+	sall	$2, %eax
+	movslq	%eax, %rdx
+	movq	-80(%rbp), %rax
+	movq	16(%rax), %rax
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+	movl	-40(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-80(%rbp), %rax
+	movq	24(%rax), %rax
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+	movl	$1, -48(%rbp)
+	jmp	.L2147
+.L2141:
+	movq	-80(%rbp), %rax
+	movl	48(%rax), %eax
+	sarl	$2, %eax
+	andl	$7, %eax
+	movl	%eax, -52(%rbp)
+	movq	-80(%rbp), %rax
+	movl	(%rax), %edx
+	movq	-80(%rbp), %rax
+	movl	4(%rax), %eax
+	imull	%edx, %eax
+	movl	%eax, -40(%rbp)
+	cmpl	$3, -52(%rbp)
+	jne	.L2148
+	cmpq	$0, -104(%rbp)
+	jne	.L2148
+	movl	$2, -52(%rbp)
+.L2148:
+	cmpl	$3, -52(%rbp)
+	jne	.L2149
+	movl	$0, -44(%rbp)
+	jmp	.L2150
+.L2152:
+	movq	-80(%rbp), %rax
+	movq	24(%rax), %rdx
+	movl	-44(%rbp), %eax
+	cltq
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	testb	%al, %al
+	je	.L2151
+	movl	-44(%rbp), %eax
+	sall	$2, %eax
+	movslq	%eax, %rdx
+	movq	-104(%rbp), %rax
+	leaq	(%rdx,%rax), %rcx
+	movq	-80(%rbp), %rax
+	movq	8(%rax), %rdx
+	movl	-44(%rbp), %eax
+	sall	$2, %eax
+	cltq
+	addq	%rax, %rdx
+	movl	(%rcx), %eax
+	movl	%eax, (%rdx)
+.L2151:
+	addl	$1, -44(%rbp)
+.L2150:
+	movl	-44(%rbp), %eax
+	cmpl	-40(%rbp), %eax
+	jl	.L2152
+	jmp	.L2153
+.L2149:
+	cmpl	$2, -52(%rbp)
+	jne	.L2153
+	movl	$0, -44(%rbp)
+	jmp	.L2154
+.L2156:
+	movq	-80(%rbp), %rax
+	movq	24(%rax), %rdx
+	movl	-44(%rbp), %eax
+	cltq
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	testb	%al, %al
+	je	.L2155
+	movq	-80(%rbp), %rax
+	movq	16(%rax), %rdx
+	movl	-44(%rbp), %eax
+	sall	$2, %eax
+	cltq
+	leaq	(%rdx,%rax), %rcx
+	movq	-80(%rbp), %rax
+	movq	8(%rax), %rdx
+	movl	-44(%rbp), %eax
+	sall	$2, %eax
+	cltq
+	addq	%rax, %rdx
+	movl	(%rcx), %eax
+	movl	%eax, (%rdx)
+.L2155:
+	addl	$1, -44(%rbp)
+.L2154:
+	movl	-44(%rbp), %eax
+	cmpl	-40(%rbp), %eax
+	jl	.L2156
+.L2153:
+	movq	-80(%rbp), %rax
+	movl	(%rax), %edx
+	movq	-80(%rbp), %rax
+	movl	4(%rax), %eax
+	imull	%edx, %eax
+	sall	$2, %eax
+	movslq	%eax, %rdx
+	movq	-80(%rbp), %rax
+	movq	8(%rax), %rcx
+	movq	-80(%rbp), %rax
+	movq	16(%rax), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	memcpy@PLT
+.L2147:
+	movq	-80(%rbp), %rax
+	movl	(%rax), %edx
+	movq	-80(%rbp), %rax
+	movl	4(%rax), %eax
+	imull	%edx, %eax
+	movslq	%eax, %rdx
+	movq	-80(%rbp), %rax
+	movq	24(%rax), %rax
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+.L2182:
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -36(%rbp)
+	cmpl	$59, -36(%rbp)
+	je	.L2157
+	cmpl	$59, -36(%rbp)
+	jg	.L2158
+	cmpl	$33, -36(%rbp)
+	je	.L2159
+	cmpl	$44, -36(%rbp)
+	jne	.L2158
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movl	%eax, -32(%rbp)
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movl	%eax, -28(%rbp)
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movl	%eax, -24(%rbp)
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movl	%eax, -20(%rbp)
+	movl	-32(%rbp), %edx
+	movl	-24(%rbp), %eax
+	addl	%eax, %edx
+	movq	-80(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, %edx
+	jg	.L2160
+	movl	-28(%rbp), %edx
+	movl	-20(%rbp), %eax
+	addl	%eax, %edx
+	movq	-80(%rbp), %rax
+	movl	4(%rax), %eax
+	cmpl	%eax, %edx
+	jle	.L2161
+.L2160:
+	movl	$0, %eax
+	jmp	.L2143
+.L2161:
+	movq	-80(%rbp), %rax
+	movl	(%rax), %eax
+	leal	0(,%rax,4), %edx
+	movq	-80(%rbp), %rax
+	movl	%edx, 34916(%rax)
+	movl	-32(%rbp), %eax
+	leal	0(,%rax,4), %edx
+	movq	-80(%rbp), %rax
+	movl	%edx, 34892(%rax)
+	movq	-80(%rbp), %rax
+	movl	34916(%rax), %eax
+	imull	-28(%rbp), %eax
+	movl	%eax, %edx
+	movq	-80(%rbp), %rax
+	movl	%edx, 34896(%rax)
+	movq	-80(%rbp), %rax
+	movl	34892(%rax), %eax
+	movl	-24(%rbp), %edx
+	sall	$2, %edx
+	addl	%eax, %edx
+	movq	-80(%rbp), %rax
+	movl	%edx, 34900(%rax)
+	movq	-80(%rbp), %rax
+	movl	34896(%rax), %edx
+	movq	-80(%rbp), %rax
+	movl	34916(%rax), %eax
+	imull	-20(%rbp), %eax
+	addl	%eax, %edx
+	movq	-80(%rbp), %rax
+	movl	%edx, 34904(%rax)
+	movq	-80(%rbp), %rax
+	movl	34892(%rax), %edx
+	movq	-80(%rbp), %rax
+	movl	%edx, 34908(%rax)
+	movq	-80(%rbp), %rax
+	movl	34896(%rax), %edx
+	movq	-80(%rbp), %rax
+	movl	%edx, 34912(%rax)
+	cmpl	$0, -24(%rbp)
+	jne	.L2162
+	movq	-80(%rbp), %rax
+	movl	34904(%rax), %edx
+	movq	-80(%rbp), %rax
+	movl	%edx, 34912(%rax)
+.L2162:
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %edx
+	movq	-80(%rbp), %rax
+	movl	%edx, 34888(%rax)
+	movq	-80(%rbp), %rax
+	movl	34888(%rax), %eax
+	andl	$64, %eax
+	testl	%eax, %eax
+	je	.L2163
+	movq	-80(%rbp), %rax
+	movl	34916(%rax), %eax
+	leal	0(,%rax,8), %edx
+	movq	-80(%rbp), %rax
+	movl	%edx, 34884(%rax)
+	movq	-80(%rbp), %rax
+	movl	$3, 34880(%rax)
+	jmp	.L2164
+.L2163:
+	movq	-80(%rbp), %rax
+	movl	34916(%rax), %edx
+	movq	-80(%rbp), %rax
+	movl	%edx, 34884(%rax)
+	movq	-80(%rbp), %rax
+	movl	$0, 34880(%rax)
+.L2164:
+	movq	-80(%rbp), %rax
+	movl	34888(%rax), %eax
+	andl	$128, %eax
+	testl	%eax, %eax
+	je	.L2165
+	movq	-80(%rbp), %rax
+	movl	48(%rax), %eax
+	andl	$1, %eax
+	testl	%eax, %eax
+	je	.L2166
+	movq	-80(%rbp), %rax
+	movl	44(%rax), %eax
+	jmp	.L2167
+.L2166:
+	movl	$-1, %eax
+.L2167:
+	movq	-80(%rbp), %rdx
+	movl	34888(%rdx), %edx
+	andl	$7, %edx
+	movl	$2, %esi
+	movl	%edx, %ecx
+	sall	%cl, %esi
+	movl	%esi, %edx
+	movq	-80(%rbp), %rcx
+	leaq	1076(%rcx), %rsi
+	movq	-72(%rbp), %rdi
+	movl	%eax, %ecx
+	call	stbi__gif_parse_colortable
+	movq	-80(%rbp), %rax
+	leaq	1076(%rax), %rdx
+	movq	-80(%rbp), %rax
+	movq	%rdx, 34872(%rax)
+	jmp	.L2168
+.L2165:
+	movq	-80(%rbp), %rax
+	movl	32(%rax), %eax
+	andl	$128, %eax
+	testl	%eax, %eax
+	je	.L2169
+	movq	-80(%rbp), %rax
+	leaq	52(%rax), %rdx
+	movq	-80(%rbp), %rax
+	movq	%rdx, 34872(%rax)
+	jmp	.L2168
+.L2169:
+	movl	$0, %eax
+	jmp	.L2143
+.L2168:
+	movq	-80(%rbp), %rdx
+	movq	-72(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__process_gif_raster
+	movq	%rax, -8(%rbp)
+	cmpq	$0, -8(%rbp)
+	jne	.L2170
+	movl	$0, %eax
+	jmp	.L2143
+.L2170:
+	movq	-80(%rbp), %rax
+	movl	(%rax), %edx
+	movq	-80(%rbp), %rax
+	movl	4(%rax), %eax
+	imull	%edx, %eax
+	movl	%eax, -40(%rbp)
+	cmpl	$0, -48(%rbp)
+	je	.L2171
+	movq	-80(%rbp), %rax
+	movl	36(%rax), %eax
+	testl	%eax, %eax
+	jle	.L2171
+	movl	$0, -44(%rbp)
+	jmp	.L2172
+.L2174:
+	movq	-80(%rbp), %rax
+	movq	24(%rax), %rdx
+	movl	-44(%rbp), %eax
+	cltq
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	testb	%al, %al
+	jne	.L2173
+	movq	-80(%rbp), %rax
+	movl	36(%rax), %edx
+	movq	-80(%rbp), %rax
+	movslq	%edx, %rdx
+	movb	$-1, 55(%rax,%rdx,4)
+	movq	-80(%rbp), %rax
+	movl	36(%rax), %eax
+	cltq
+	addq	$12, %rax
+	leaq	0(,%rax,4), %rdx
+	movq	-80(%rbp), %rax
+	addq	%rdx, %rax
+	leaq	4(%rax), %rcx
+	movq	-80(%rbp), %rax
+	movq	8(%rax), %rdx
+	movl	-44(%rbp), %eax
+	sall	$2, %eax
+	cltq
+	addq	%rax, %rdx
+	movl	(%rcx), %eax
+	movl	%eax, (%rdx)
+.L2173:
+	addl	$1, -44(%rbp)
+.L2172:
+	movl	-44(%rbp), %eax
+	cmpl	-40(%rbp), %eax
+	jl	.L2174
+.L2171:
+	movq	-8(%rbp), %rax
+	jmp	.L2143
+.L2159:
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -16(%rbp)
+	cmpl	$249, -16(%rbp)
+	jne	.L2180
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -12(%rbp)
+	cmpl	$4, -12(%rbp)
+	jne	.L2176
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %edx
+	movq	-80(%rbp), %rax
+	movl	%edx, 48(%rax)
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16le
+	movl	%eax, %edx
+	movl	%edx, %eax
+	sall	$2, %eax
+	addl	%edx, %eax
+	addl	%eax, %eax
+	movl	%eax, %edx
+	movq	-80(%rbp), %rax
+	movl	%edx, 34920(%rax)
+	movq	-80(%rbp), %rax
+	movl	44(%rax), %eax
+	testl	%eax, %eax
+	js	.L2177
+	movq	-80(%rbp), %rax
+	movl	44(%rax), %edx
+	movq	-80(%rbp), %rax
+	movslq	%edx, %rdx
+	movb	$-1, 55(%rax,%rdx,4)
+.L2177:
+	movq	-80(%rbp), %rax
+	movl	48(%rax), %eax
+	andl	$1, %eax
+	testl	%eax, %eax
+	je	.L2178
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %edx
+	movq	-80(%rbp), %rax
+	movl	%edx, 44(%rax)
+	movq	-80(%rbp), %rax
+	movl	44(%rax), %eax
+	testl	%eax, %eax
+	js	.L2180
+	movq	-80(%rbp), %rax
+	movl	44(%rax), %edx
+	movq	-80(%rbp), %rax
+	movslq	%edx, %rdx
+	movb	$0, 55(%rax,%rdx,4)
+	jmp	.L2180
+.L2178:
+	movq	-72(%rbp), %rax
+	movl	$1, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movq	-80(%rbp), %rax
+	movl	$-1, 44(%rax)
+	jmp	.L2180
+.L2176:
+	movl	-12(%rbp), %edx
+	movq	-72(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	jmp	.L2179
+.L2181:
+	movl	-12(%rbp), %edx
+	movq	-72(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+.L2180:
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -12(%rbp)
+	cmpl	$0, -12(%rbp)
+	jne	.L2181
+	jmp	.L2179
+.L2157:
+	movq	-72(%rbp), %rax
+	jmp	.L2143
+.L2158:
+	movl	$0, %eax
+	jmp	.L2143
+.L2179:
+	jmp	.L2182
+.L2143:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5063:
+	.size	stbi__gif_load_next, .-stbi__gif_load_next
+	.type	stbi__load_gif_main_outofmem, @function
+stbi__load_gif_main_outofmem:
+.LFB5064:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	%rsi, -16(%rbp)
+	movq	%rdx, -24(%rbp)
+	movq	-8(%rbp), %rax
+	movq	8(%rax), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-8(%rbp), %rax
+	movq	24(%rax), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-8(%rbp), %rax
+	movq	16(%rax), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	cmpq	$0, -16(%rbp)
+	je	.L2184
+	movq	-16(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+.L2184:
+	cmpq	$0, -24(%rbp)
+	je	.L2185
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	testq	%rax, %rax
+	je	.L2185
+	movq	-24(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+.L2185:
+	movl	$0, %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5064:
+	.size	stbi__load_gif_main_outofmem, .-stbi__load_gif_main_outofmem
+	.type	stbi__load_gif_main, @function
+stbi__load_gif_main:
+.LFB5065:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$35056, %rsp
+	movq	%rdi, -35016(%rbp)
+	movq	%rsi, -35024(%rbp)
+	movq	%rdx, -35032(%rbp)
+	movq	%rcx, -35040(%rbp)
+	movq	%r8, -35048(%rbp)
+	movq	%r9, -35056(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-35016(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__gif_test
+	testl	%eax, %eax
+	je	.L2188
+	movl	$0, -35000(%rbp)
+	movq	$0, -34984(%rbp)
+	movq	$0, -34976(%rbp)
+	movq	$0, -34968(%rbp)
+	movl	$0, -34996(%rbp)
+	movl	$0, -34992(%rbp)
+	leaq	-34944(%rbp), %rax
+	movl	$34928, %edx
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+	cmpq	$0, -35024(%rbp)
+	je	.L2200
+	movq	-35024(%rbp), %rax
+	movq	$0, (%rax)
+.L2200:
+	movq	-34968(%rbp), %rdi
+	movl	16(%rbp), %ecx
+	movq	-35056(%rbp), %rdx
+	leaq	-34944(%rbp), %rsi
+	movq	-35016(%rbp), %rax
+	movq	%rdi, %r8
+	movq	%rax, %rdi
+	call	stbi__gif_load_next
+	movq	%rax, -34984(%rbp)
+	movq	-34984(%rbp), %rax
+	cmpq	-35016(%rbp), %rax
+	jne	.L2190
+	movq	$0, -34984(%rbp)
+.L2190:
+	cmpq	$0, -34984(%rbp)
+	je	.L2191
+	movl	-34944(%rbp), %edx
+	movq	-35032(%rbp), %rax
+	movl	%edx, (%rax)
+	movl	-34940(%rbp), %edx
+	movq	-35040(%rbp), %rax
+	movl	%edx, (%rax)
+	addl	$1, -35000(%rbp)
+	movl	-34944(%rbp), %edx
+	movl	-34940(%rbp), %eax
+	imull	%edx, %eax
+	sall	$2, %eax
+	movl	%eax, -34988(%rbp)
+	cmpq	$0, -34976(%rbp)
+	je	.L2192
+	movl	-35000(%rbp), %eax
+	imull	-34988(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-34976(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	realloc@PLT
+	movq	%rax, -34960(%rbp)
+	cmpq	$0, -34960(%rbp)
+	jne	.L2193
+	movq	-35024(%rbp), %rdx
+	movq	-34976(%rbp), %rcx
+	leaq	-34944(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__load_gif_main_outofmem
+	jmp	.L2202
+.L2193:
+	movq	-34960(%rbp), %rax
+	movq	%rax, -34976(%rbp)
+	movl	-35000(%rbp), %eax
+	imull	-34988(%rbp), %eax
+	movl	%eax, -34996(%rbp)
+	cmpq	$0, -35024(%rbp)
+	je	.L2195
+	movl	-35000(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	movq	-35024(%rbp), %rax
+	movq	(%rax), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	realloc@PLT
+	movq	%rax, -34952(%rbp)
+	cmpq	$0, -34952(%rbp)
+	jne	.L2196
+	movq	-35024(%rbp), %rdx
+	movq	-34976(%rbp), %rcx
+	leaq	-34944(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__load_gif_main_outofmem
+	jmp	.L2202
+.L2196:
+	movq	-35024(%rbp), %rax
+	movq	-34952(%rbp), %rdx
+	movq	%rdx, (%rax)
+	movl	-35000(%rbp), %eax
+	cltq
+	sall	$2, %eax
+	movl	%eax, -34992(%rbp)
+	jmp	.L2195
+.L2192:
+	movl	-35000(%rbp), %eax
+	imull	-34988(%rbp), %eax
+	cltq
+	movq	%rax, %rdi
+	call	stbi__malloc
+	movq	%rax, -34976(%rbp)
+	cmpq	$0, -34976(%rbp)
+	jne	.L2197
+	movq	-35024(%rbp), %rdx
+	movq	-34976(%rbp), %rcx
+	leaq	-34944(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__load_gif_main_outofmem
+	jmp	.L2202
+.L2197:
+	movl	-35000(%rbp), %eax
+	imull	-34988(%rbp), %eax
+	movl	%eax, -34996(%rbp)
+	cmpq	$0, -35024(%rbp)
+	je	.L2195
+	movl	-35000(%rbp), %eax
+	cltq
+	salq	$2, %rax
+	movq	%rax, %rdi
+	call	stbi__malloc
+	movq	-35024(%rbp), %rdx
+	movq	%rax, (%rdx)
+	movq	-35024(%rbp), %rax
+	movq	(%rax), %rax
+	testq	%rax, %rax
+	jne	.L2198
+	movq	-35024(%rbp), %rdx
+	movq	-34976(%rbp), %rcx
+	leaq	-34944(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__load_gif_main_outofmem
+	jmp	.L2202
+.L2198:
+	movl	-35000(%rbp), %eax
+	cltq
+	sall	$2, %eax
+	movl	%eax, -34992(%rbp)
+.L2195:
+	movl	-34988(%rbp), %eax
+	movslq	%eax, %rdx
+	movl	-35000(%rbp), %eax
+	subl	$1, %eax
+	imull	-34988(%rbp), %eax
+	movslq	%eax, %rcx
+	movq	-34976(%rbp), %rax
+	addq	%rax, %rcx
+	movq	-34984(%rbp), %rax
+	movq	%rax, %rsi
+	movq	%rcx, %rdi
+	call	memcpy@PLT
+	cmpl	$1, -35000(%rbp)
+	jle	.L2199
+	movl	-34988(%rbp), %eax
+	addl	%eax, %eax
+	cltq
+	negq	%rax
+	movq	%rax, %rdx
+	movq	-34976(%rbp), %rax
+	addq	%rdx, %rax
+	movq	%rax, -34968(%rbp)
+.L2199:
+	cmpq	$0, -35024(%rbp)
+	je	.L2191
+	movq	-35024(%rbp), %rax
+	movq	(%rax), %rax
+	movl	-35000(%rbp), %edx
+	subl	$1, %edx
+	movl	%edx, %edx
+	salq	$2, %rdx
+	addq	%rax, %rdx
+	movl	-24(%rbp), %eax
+	movl	%eax, (%rdx)
+.L2191:
+	cmpq	$0, -34984(%rbp)
+	jne	.L2200
+	movq	-34936(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-34920(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-34928(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	cmpl	$0, 16(%rbp)
+	je	.L2201
+	cmpl	$4, 16(%rbp)
+	je	.L2201
+	movl	-34940(%rbp), %eax
+	movl	%eax, %esi
+	movl	-34944(%rbp), %eax
+	imull	-35000(%rbp), %eax
+	movl	%eax, %ecx
+	movl	16(%rbp), %edx
+	movq	-34976(%rbp), %rax
+	movl	%esi, %r8d
+	movl	$4, %esi
+	movq	%rax, %rdi
+	call	stbi__convert_format
+	movq	%rax, -34976(%rbp)
+.L2201:
+	movq	-35048(%rbp), %rax
+	movl	-35000(%rbp), %edx
+	movl	%edx, (%rax)
+	movq	-34976(%rbp), %rax
+	jmp	.L2202
+.L2188:
+	movl	$0, %eax
+.L2202:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2203
+	call	__stack_chk_fail@PLT
+.L2203:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5065:
+	.size	stbi__load_gif_main, .-stbi__load_gif_main
+	.type	stbi__gif_load, @function
+stbi__gif_load:
+.LFB5066:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$35008, %rsp
+	movq	%rdi, -34968(%rbp)
+	movq	%rsi, -34976(%rbp)
+	movq	%rdx, -34984(%rbp)
+	movq	%rcx, -34992(%rbp)
+	movl	%r8d, -34996(%rbp)
+	movq	%r9, -35008(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	$0, -34952(%rbp)
+	leaq	-34944(%rbp), %rax
+	movl	$34928, %edx
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	memset@PLT
+	movl	-34996(%rbp), %ecx
+	movq	-34992(%rbp), %rdx
+	leaq	-34944(%rbp), %rsi
+	movq	-34968(%rbp), %rax
+	movl	$0, %r8d
+	movq	%rax, %rdi
+	call	stbi__gif_load_next
+	movq	%rax, -34952(%rbp)
+	movq	-34952(%rbp), %rax
+	cmpq	-34968(%rbp), %rax
+	jne	.L2205
+	movq	$0, -34952(%rbp)
+.L2205:
+	cmpq	$0, -34952(%rbp)
+	je	.L2206
+	movl	-34944(%rbp), %edx
+	movq	-34976(%rbp), %rax
+	movl	%edx, (%rax)
+	movl	-34940(%rbp), %edx
+	movq	-34984(%rbp), %rax
+	movl	%edx, (%rax)
+	cmpl	$0, -34996(%rbp)
+	je	.L2207
+	cmpl	$4, -34996(%rbp)
+	je	.L2207
+	movl	-34940(%rbp), %eax
+	movl	%eax, %esi
+	movl	-34944(%rbp), %eax
+	movl	%eax, %ecx
+	movl	-34996(%rbp), %edx
+	movq	-34952(%rbp), %rax
+	movl	%esi, %r8d
+	movl	$4, %esi
+	movq	%rax, %rdi
+	call	stbi__convert_format
+	movq	%rax, -34952(%rbp)
+	jmp	.L2207
+.L2206:
+	movq	-34936(%rbp), %rax
+	testq	%rax, %rax
+	je	.L2207
+	movq	-34936(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+.L2207:
+	movq	-34920(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-34928(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-34952(%rbp), %rax
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2209
+	call	__stack_chk_fail@PLT
+.L2209:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5066:
+	.size	stbi__gif_load, .-stbi__gif_load
+	.type	stbi__gif_info, @function
+stbi__gif_info:
+.LFB5067:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	%rsi, -16(%rbp)
+	movq	%rdx, -24(%rbp)
+	movq	%rcx, -32(%rbp)
+	movq	-32(%rbp), %rcx
+	movq	-24(%rbp), %rdx
+	movq	-16(%rbp), %rsi
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__gif_info_raw
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5067:
+	.size	stbi__gif_info, .-stbi__gif_info
+	.type	stbi__hdr_test_core, @function
+stbi__hdr_test_core:
+.LFB5068:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movl	$0, -4(%rbp)
+	jmp	.L2213
+.L2216:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %edx
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rcx
+	movq	-32(%rbp), %rax
+	addq	%rcx, %rax
+	movzbl	(%rax), %eax
+	movsbl	%al, %eax
+	cmpl	%eax, %edx
+	je	.L2214
+	movl	$0, %eax
+	jmp	.L2215
+.L2214:
+	addl	$1, -4(%rbp)
+.L2213:
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-32(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	testb	%al, %al
+	jne	.L2216
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$1, %eax
+.L2215:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5068:
+	.size	stbi__hdr_test_core, .-stbi__hdr_test_core
+	.section	.rodata
+.LC88:
+	.string	"#?RADIANCE\n"
+.LC89:
+	.string	"#?RGBE\n"
+	.text
+	.type	stbi__hdr_test, @function
+stbi__hdr_test:
+.LFB5069:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	leaq	.LC88(%rip), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__hdr_test_core
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	cmpl	$0, -4(%rbp)
+	jne	.L2218
+	movq	-24(%rbp), %rax
+	leaq	.LC89(%rip), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__hdr_test_core
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+.L2218:
+	movl	-4(%rbp), %eax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5069:
+	.size	stbi__hdr_test, .-stbi__hdr_test
+	.type	stbi__hdr_gettoken, @function
+stbi__hdr_gettoken:
+.LFB5070:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movl	$0, -4(%rbp)
+	movb	$0, -5(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -5(%rbp)
+	jmp	.L2221
+.L2226:
+	movl	-4(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -4(%rbp)
+	movslq	%eax, %rdx
+	movq	-32(%rbp), %rax
+	addq	%rax, %rdx
+	movzbl	-5(%rbp), %eax
+	movb	%al, (%rdx)
+	cmpl	$1023, -4(%rbp)
+	jne	.L2222
+	nop
+.L2224:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__at_eof
+	testl	%eax, %eax
+	jne	.L2228
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	cmpb	$10, %al
+	jne	.L2224
+	jmp	.L2228
+.L2222:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -5(%rbp)
+.L2221:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__at_eof
+	testl	%eax, %eax
+	jne	.L2225
+	cmpb	$10, -5(%rbp)
+	jne	.L2226
+	jmp	.L2225
+.L2228:
+	nop
+.L2225:
+	movl	-4(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	-32(%rbp), %rax
+	addq	%rdx, %rax
+	movb	$0, (%rax)
+	movq	-32(%rbp), %rax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5070:
+	.size	stbi__hdr_gettoken, .-stbi__hdr_gettoken
+	.type	stbi__hdr_convert, @function
+stbi__hdr_convert:
+.LFB5071:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movl	%edx, -36(%rbp)
+	movq	-32(%rbp), %rax
+	addq	$3, %rax
+	movzbl	(%rax), %eax
+	testb	%al, %al
+	je	.L2230
+	movq	-32(%rbp), %rax
+	addq	$3, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	leal	-136(%rax), %edx
+	movq	.LC90(%rip), %rax
+	movl	%edx, %edi
+	movq	%rax, %xmm0
+	call	ldexp@PLT
+	cvtsd2ss	%xmm0, %xmm0
+	movss	%xmm0, -4(%rbp)
+	cmpl	$2, -36(%rbp)
+	jg	.L2231
+	movq	-32(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %edx
+	movq	-32(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	%eax, %edx
+	movq	-32(%rbp), %rax
+	addq	$2, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	addl	%edx, %eax
+	pxor	%xmm0, %xmm0
+	cvtsi2ssl	%eax, %xmm0
+	mulss	-4(%rbp), %xmm0
+	movss	.LC91(%rip), %xmm1
+	divss	%xmm1, %xmm0
+	movq	-24(%rbp), %rax
+	movss	%xmm0, (%rax)
+	jmp	.L2232
+.L2231:
+	movq	-32(%rbp), %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	pxor	%xmm0, %xmm0
+	cvtsi2ssl	%eax, %xmm0
+	mulss	-4(%rbp), %xmm0
+	movq	-24(%rbp), %rax
+	movss	%xmm0, (%rax)
+	movq	-32(%rbp), %rax
+	addq	$1, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	pxor	%xmm0, %xmm0
+	cvtsi2ssl	%eax, %xmm0
+	movq	-24(%rbp), %rax
+	addq	$4, %rax
+	mulss	-4(%rbp), %xmm0
+	movss	%xmm0, (%rax)
+	movq	-32(%rbp), %rax
+	addq	$2, %rax
+	movzbl	(%rax), %eax
+	movzbl	%al, %eax
+	pxor	%xmm0, %xmm0
+	cvtsi2ssl	%eax, %xmm0
+	movq	-24(%rbp), %rax
+	addq	$8, %rax
+	mulss	-4(%rbp), %xmm0
+	movss	%xmm0, (%rax)
+.L2232:
+	cmpl	$2, -36(%rbp)
+	jne	.L2233
+	movq	-24(%rbp), %rax
+	addq	$4, %rax
+	movss	.LC3(%rip), %xmm0
+	movss	%xmm0, (%rax)
+.L2233:
+	cmpl	$4, -36(%rbp)
+	jne	.L2239
+	movq	-24(%rbp), %rax
+	addq	$12, %rax
+	movss	.LC3(%rip), %xmm0
+	movss	%xmm0, (%rax)
+	jmp	.L2239
+.L2230:
+	cmpl	$4, -36(%rbp)
+	je	.L2235
+	cmpl	$4, -36(%rbp)
+	jg	.L2239
+	cmpl	$3, -36(%rbp)
+	je	.L2236
+	cmpl	$3, -36(%rbp)
+	jg	.L2239
+	cmpl	$1, -36(%rbp)
+	je	.L2237
+	cmpl	$2, -36(%rbp)
+	je	.L2238
+	jmp	.L2239
+.L2235:
+	movq	-24(%rbp), %rax
+	addq	$12, %rax
+	movss	.LC3(%rip), %xmm0
+	movss	%xmm0, (%rax)
+.L2236:
+	movq	-24(%rbp), %rax
+	leaq	8(%rax), %rdx
+	pxor	%xmm0, %xmm0
+	movss	%xmm0, (%rdx)
+	movq	-24(%rbp), %rax
+	addq	$4, %rax
+	movss	(%rdx), %xmm0
+	movss	%xmm0, (%rax)
+	movss	(%rax), %xmm0
+	movq	-24(%rbp), %rax
+	movss	%xmm0, (%rax)
+	jmp	.L2234
+.L2238:
+	movq	-24(%rbp), %rax
+	addq	$4, %rax
+	movss	.LC3(%rip), %xmm0
+	movss	%xmm0, (%rax)
+.L2237:
+	movq	-24(%rbp), %rax
+	pxor	%xmm0, %xmm0
+	movss	%xmm0, (%rax)
+	nop
+.L2234:
+.L2239:
+	nop
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5071:
+	.size	stbi__hdr_convert, .-stbi__hdr_convert
+	.section	.rodata
+.LC92:
+	.string	"#?RADIANCE"
+.LC93:
+	.string	"#?RGBE"
+.LC94:
+	.string	"FORMAT=32-bit_rle_rgbe"
+.LC95:
+	.string	"-Y "
+.LC96:
+	.string	"+X "
+	.text
+	.type	stbi__hdr_load, @function
+stbi__hdr_load:
+.LFB5072:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$1192, %rsp
+	.cfi_offset 3, -24
+	movq	%rdi, -1160(%rbp)
+	movq	%rsi, -1168(%rbp)
+	movq	%rdx, -1176(%rbp)
+	movq	%rcx, -1184(%rbp)
+	movl	%r8d, -1188(%rbp)
+	movq	%r9, -1200(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -24(%rbp)
+	xorl	%eax, %eax
+	movl	$0, -1140(%rbp)
+	leaq	-1056(%rbp), %rdx
+	movq	-1160(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__hdr_gettoken
+	movq	%rax, -1080(%rbp)
+	movq	-1080(%rbp), %rax
+	leaq	.LC92(%rip), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	strcmp@PLT
+	testl	%eax, %eax
+	je	.L2246
+	movq	-1080(%rbp), %rax
+	leaq	.LC93(%rip), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	strcmp@PLT
+	testl	%eax, %eax
+	je	.L2246
+	movl	$0, %eax
+	jmp	.L2286
+.L2246:
+	leaq	-1056(%rbp), %rdx
+	movq	-1160(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__hdr_gettoken
+	movq	%rax, -1096(%rbp)
+	movq	-1096(%rbp), %rax
+	movzbl	(%rax), %eax
+	testb	%al, %al
+	je	.L2289
+	movq	-1096(%rbp), %rax
+	leaq	.LC94(%rip), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	strcmp@PLT
+	testl	%eax, %eax
+	jne	.L2246
+	movl	$1, -1140(%rbp)
+	jmp	.L2246
+.L2289:
+	nop
+	cmpl	$0, -1140(%rbp)
+	jne	.L2247
+	movl	$0, %eax
+	jmp	.L2286
+.L2247:
+	leaq	-1056(%rbp), %rdx
+	movq	-1160(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__hdr_gettoken
+	movq	%rax, -1096(%rbp)
+	movq	-1096(%rbp), %rax
+	movl	$3, %edx
+	leaq	.LC95(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	strncmp@PLT
+	testl	%eax, %eax
+	je	.L2248
+	movl	$0, %eax
+	jmp	.L2286
+.L2248:
+	movq	-1096(%rbp), %rax
+	addq	$3, %rax
+	movq	%rax, -1096(%rbp)
+	movq	-1096(%rbp), %rax
+	leaq	-1096(%rbp), %rcx
+	movl	$10, %edx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	strtol@PLT
+	movl	%eax, -1120(%rbp)
+	jmp	.L2249
+.L2250:
+	movq	-1096(%rbp), %rax
+	addq	$1, %rax
+	movq	%rax, -1096(%rbp)
+.L2249:
+	movq	-1096(%rbp), %rax
+	movzbl	(%rax), %eax
+	cmpb	$32, %al
+	je	.L2250
+	movq	-1096(%rbp), %rax
+	movl	$3, %edx
+	leaq	.LC96(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	strncmp@PLT
+	testl	%eax, %eax
+	je	.L2251
+	movl	$0, %eax
+	jmp	.L2286
+.L2251:
+	movq	-1096(%rbp), %rax
+	addq	$3, %rax
+	movq	%rax, -1096(%rbp)
+	movq	-1096(%rbp), %rax
+	movl	$10, %edx
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	strtol@PLT
+	movl	%eax, -1116(%rbp)
+	cmpl	$16777216, -1120(%rbp)
+	jle	.L2252
+	movl	$0, %eax
+	jmp	.L2286
+.L2252:
+	cmpl	$16777216, -1116(%rbp)
+	jle	.L2253
+	movl	$0, %eax
+	jmp	.L2286
+.L2253:
+	movq	-1168(%rbp), %rax
+	movl	-1116(%rbp), %edx
+	movl	%edx, (%rax)
+	movq	-1176(%rbp), %rax
+	movl	-1120(%rbp), %edx
+	movl	%edx, (%rax)
+	cmpq	$0, -1184(%rbp)
+	je	.L2254
+	movq	-1184(%rbp), %rax
+	movl	$3, (%rax)
+.L2254:
+	cmpl	$0, -1188(%rbp)
+	jne	.L2255
+	movl	$3, -1188(%rbp)
+.L2255:
+	movl	-1188(%rbp), %edx
+	movl	-1120(%rbp), %esi
+	movl	-1116(%rbp), %eax
+	movl	$0, %r8d
+	movl	$4, %ecx
+	movl	%eax, %edi
+	call	stbi__mad4sizes_valid
+	testl	%eax, %eax
+	jne	.L2256
+	movl	$0, %eax
+	jmp	.L2286
+.L2256:
+	movl	-1188(%rbp), %edx
+	movl	-1120(%rbp), %esi
+	movl	-1116(%rbp), %eax
+	movl	$0, %r8d
+	movl	$4, %ecx
+	movl	%eax, %edi
+	call	stbi__malloc_mad4
+	movq	%rax, -1072(%rbp)
+	cmpq	$0, -1072(%rbp)
+	jne	.L2257
+	movl	$0, %eax
+	jmp	.L2286
+.L2257:
+	cmpl	$7, -1116(%rbp)
+	jle	.L2258
+	cmpl	$32767, -1116(%rbp)
+	jle	.L2259
+.L2258:
+	movl	$0, -1132(%rbp)
+	jmp	.L2260
+.L2263:
+	movl	$0, -1136(%rbp)
+	jmp	.L2261
+.L2290:
+	nop
+.L2262:
+	leaq	-1060(%rbp), %rcx
+	movq	-1160(%rbp), %rax
+	movl	$4, %edx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__getn
+	movl	-1132(%rbp), %eax
+	imull	-1116(%rbp), %eax
+	imull	-1188(%rbp), %eax
+	movslq	%eax, %rdx
+	movl	-1136(%rbp), %eax
+	imull	-1188(%rbp), %eax
+	cltq
+	addq	%rdx, %rax
+	leaq	0(,%rax,4), %rdx
+	movq	-1072(%rbp), %rax
+	leaq	(%rdx,%rax), %rcx
+	movl	-1188(%rbp), %edx
+	leaq	-1060(%rbp), %rax
+	movq	%rax, %rsi
+	movq	%rcx, %rdi
+	call	stbi__hdr_convert
+	addl	$1, -1136(%rbp)
+.L2261:
+	movl	-1136(%rbp), %eax
+	cmpl	-1116(%rbp), %eax
+	jl	.L2290
+	addl	$1, -1132(%rbp)
+.L2260:
+	movl	-1132(%rbp), %eax
+	cmpl	-1120(%rbp), %eax
+	jl	.L2263
+	jmp	.L2264
+.L2259:
+	movq	$0, -1088(%rbp)
+	movl	$0, -1132(%rbp)
+	jmp	.L2265
+.L2285:
+	movq	-1160(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -1112(%rbp)
+	movq	-1160(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -1108(%rbp)
+	movq	-1160(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -1104(%rbp)
+	cmpl	$2, -1112(%rbp)
+	jne	.L2266
+	cmpl	$2, -1108(%rbp)
+	jne	.L2266
+	movl	-1104(%rbp), %eax
+	andl	$128, %eax
+	testl	%eax, %eax
+	je	.L2267
+.L2266:
+	movl	-1112(%rbp), %eax
+	movb	%al, -1060(%rbp)
+	movl	-1108(%rbp), %eax
+	movb	%al, -1059(%rbp)
+	movl	-1104(%rbp), %eax
+	movb	%al, -1058(%rbp)
+	movq	-1160(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -1057(%rbp)
+	movl	-1188(%rbp), %edx
+	leaq	-1060(%rbp), %rcx
+	movq	-1072(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__hdr_convert
+	movl	$1, -1136(%rbp)
+	movl	$0, -1132(%rbp)
+	movq	-1088(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	jmp	.L2262
+.L2267:
+	sall	$8, -1104(%rbp)
+	movq	-1160(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	orl	%eax, -1104(%rbp)
+	movl	-1104(%rbp), %eax
+	cmpl	-1116(%rbp), %eax
+	je	.L2268
+	movq	-1072(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-1088(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L2286
+.L2268:
+	cmpq	$0, -1088(%rbp)
+	jne	.L2269
+	movl	-1116(%rbp), %eax
+	movl	$0, %edx
+	movl	$4, %esi
+	movl	%eax, %edi
+	call	stbi__malloc_mad2
+	movq	%rax, -1088(%rbp)
+	cmpq	$0, -1088(%rbp)
+	jne	.L2269
+	movq	-1072(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L2286
+.L2269:
+	movl	$0, -1128(%rbp)
+	jmp	.L2270
+.L2282:
+	movl	$0, -1136(%rbp)
+	jmp	.L2271
+.L2281:
+	movq	-1160(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -1142(%rbp)
+	cmpb	$-128, -1142(%rbp)
+	jbe	.L2272
+	movq	-1160(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -1141(%rbp)
+	addb	$-128, -1142(%rbp)
+	cmpb	$0, -1142(%rbp)
+	je	.L2273
+	movzbl	-1142(%rbp), %eax
+	cmpl	%eax, -1100(%rbp)
+	jge	.L2274
+.L2273:
+	movq	-1072(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-1088(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L2286
+.L2274:
+	movl	$0, -1124(%rbp)
+	jmp	.L2275
+.L2276:
+	movl	-1136(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1136(%rbp)
+	leal	0(,%rax,4), %edx
+	movl	-1128(%rbp), %eax
+	addl	%edx, %eax
+	movslq	%eax, %rdx
+	movq	-1088(%rbp), %rax
+	addq	%rax, %rdx
+	movzbl	-1141(%rbp), %eax
+	movb	%al, (%rdx)
+	addl	$1, -1124(%rbp)
+.L2275:
+	movzbl	-1142(%rbp), %eax
+	cmpl	%eax, -1124(%rbp)
+	jl	.L2276
+	jmp	.L2271
+.L2272:
+	cmpb	$0, -1142(%rbp)
+	je	.L2277
+	movzbl	-1142(%rbp), %eax
+	cmpl	%eax, -1100(%rbp)
+	jge	.L2278
+.L2277:
+	movq	-1072(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	-1088(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L2286
+.L2278:
+	movl	$0, -1124(%rbp)
+	jmp	.L2279
+.L2280:
+	movl	-1136(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -1136(%rbp)
+	leal	0(,%rax,4), %edx
+	movl	-1128(%rbp), %eax
+	addl	%edx, %eax
+	movslq	%eax, %rdx
+	movq	-1088(%rbp), %rax
+	leaq	(%rdx,%rax), %rbx
+	movq	-1160(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, (%rbx)
+	addl	$1, -1124(%rbp)
+.L2279:
+	movzbl	-1142(%rbp), %eax
+	cmpl	%eax, -1124(%rbp)
+	jl	.L2280
+.L2271:
+	movl	-1116(%rbp), %eax
+	subl	-1136(%rbp), %eax
+	movl	%eax, -1100(%rbp)
+	cmpl	$0, -1100(%rbp)
+	jg	.L2281
+	addl	$1, -1128(%rbp)
+.L2270:
+	cmpl	$3, -1128(%rbp)
+	jle	.L2282
+	movl	$0, -1136(%rbp)
+	jmp	.L2283
+.L2284:
+	movl	-1136(%rbp), %eax
+	sall	$2, %eax
+	movslq	%eax, %rdx
+	movq	-1088(%rbp), %rax
+	leaq	(%rdx,%rax), %rsi
+	movl	-1132(%rbp), %eax
+	imull	-1116(%rbp), %eax
+	movl	%eax, %edx
+	movl	-1136(%rbp), %eax
+	addl	%edx, %eax
+	imull	-1188(%rbp), %eax
+	cltq
+	leaq	0(,%rax,4), %rdx
+	movq	-1072(%rbp), %rax
+	leaq	(%rdx,%rax), %rcx
+	movl	-1188(%rbp), %eax
+	movl	%eax, %edx
+	movq	%rcx, %rdi
+	call	stbi__hdr_convert
+	addl	$1, -1136(%rbp)
+.L2283:
+	movl	-1136(%rbp), %eax
+	cmpl	-1116(%rbp), %eax
+	jl	.L2284
+	addl	$1, -1132(%rbp)
+.L2265:
+	movl	-1132(%rbp), %eax
+	cmpl	-1120(%rbp), %eax
+	jl	.L2285
+	cmpq	$0, -1088(%rbp)
+	je	.L2264
+	movq	-1088(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+.L2264:
+	movq	-1072(%rbp), %rax
+.L2286:
+	movq	-24(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2287
+	call	__stack_chk_fail@PLT
+.L2287:
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5072:
+	.size	stbi__hdr_load, .-stbi__hdr_load
+	.type	stbi__hdr_info, @function
+stbi__hdr_info:
+.LFB5073:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$1088, %rsp
+	movq	%rdi, -1064(%rbp)
+	movq	%rsi, -1072(%rbp)
+	movq	%rdx, -1080(%rbp)
+	movq	%rcx, -1088(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movl	$0, -1052(%rbp)
+	cmpq	$0, -1072(%rbp)
+	jne	.L2292
+	leaq	-1056(%rbp), %rax
+	movq	%rax, -1072(%rbp)
+.L2292:
+	cmpq	$0, -1080(%rbp)
+	jne	.L2293
+	leaq	-1056(%rbp), %rax
+	movq	%rax, -1080(%rbp)
+.L2293:
+	cmpq	$0, -1088(%rbp)
+	jne	.L2294
+	leaq	-1056(%rbp), %rax
+	movq	%rax, -1088(%rbp)
+.L2294:
+	movq	-1064(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__hdr_test
+	testl	%eax, %eax
+	jne	.L2295
+	movq	-1064(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2305
+.L2295:
+	leaq	-1040(%rbp), %rdx
+	movq	-1064(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__hdr_gettoken
+	movq	%rax, -1048(%rbp)
+	movq	-1048(%rbp), %rax
+	movzbl	(%rax), %eax
+	testb	%al, %al
+	je	.L2308
+	movq	-1048(%rbp), %rax
+	leaq	.LC94(%rip), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	strcmp@PLT
+	testl	%eax, %eax
+	jne	.L2295
+	movl	$1, -1052(%rbp)
+	jmp	.L2295
+.L2308:
+	nop
+	cmpl	$0, -1052(%rbp)
+	jne	.L2300
+	movq	-1064(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2305
+.L2300:
+	leaq	-1040(%rbp), %rdx
+	movq	-1064(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__hdr_gettoken
+	movq	%rax, -1048(%rbp)
+	movq	-1048(%rbp), %rax
+	movl	$3, %edx
+	leaq	.LC95(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	strncmp@PLT
+	testl	%eax, %eax
+	je	.L2301
+	movq	-1064(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2305
+.L2301:
+	movq	-1048(%rbp), %rax
+	addq	$3, %rax
+	movq	%rax, -1048(%rbp)
+	movq	-1048(%rbp), %rax
+	leaq	-1048(%rbp), %rcx
+	movl	$10, %edx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	strtol@PLT
+	movl	%eax, %edx
+	movq	-1080(%rbp), %rax
+	movl	%edx, (%rax)
+	jmp	.L2302
+.L2303:
+	movq	-1048(%rbp), %rax
+	addq	$1, %rax
+	movq	%rax, -1048(%rbp)
+.L2302:
+	movq	-1048(%rbp), %rax
+	movzbl	(%rax), %eax
+	cmpb	$32, %al
+	je	.L2303
+	movq	-1048(%rbp), %rax
+	movl	$3, %edx
+	leaq	.LC96(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	strncmp@PLT
+	testl	%eax, %eax
+	je	.L2304
+	movq	-1064(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2305
+.L2304:
+	movq	-1048(%rbp), %rax
+	addq	$3, %rax
+	movq	%rax, -1048(%rbp)
+	movq	-1048(%rbp), %rax
+	movl	$10, %edx
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	strtol@PLT
+	movl	%eax, %edx
+	movq	-1072(%rbp), %rax
+	movl	%edx, (%rax)
+	movq	-1088(%rbp), %rax
+	movl	$3, (%rax)
+	movl	$1, %eax
+.L2305:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2306
+	call	__stack_chk_fail@PLT
+.L2306:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5073:
+	.size	stbi__hdr_info, .-stbi__hdr_info
+	.type	stbi__bmp_info, @function
+stbi__bmp_info:
+.LFB5074:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$96, %rsp
+	movq	%rdi, -72(%rbp)
+	movq	%rsi, -80(%rbp)
+	movq	%rdx, -88(%rbp)
+	movq	%rcx, -96(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movl	$255, -20(%rbp)
+	leaq	-48(%rbp), %rdx
+	movq	-72(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__bmp_parse_header
+	movq	%rax, -56(%rbp)
+	cmpq	$0, -56(%rbp)
+	jne	.L2310
+	movq	-72(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2318
+.L2310:
+	cmpq	$0, -80(%rbp)
+	je	.L2312
+	movq	-72(%rbp), %rax
+	movl	(%rax), %eax
+	movl	%eax, %edx
+	movq	-80(%rbp), %rax
+	movl	%edx, (%rax)
+.L2312:
+	cmpq	$0, -88(%rbp)
+	je	.L2313
+	movq	-72(%rbp), %rax
+	movl	4(%rax), %eax
+	movl	%eax, %edx
+	movq	-88(%rbp), %rax
+	movl	%edx, (%rax)
+.L2313:
+	cmpq	$0, -96(%rbp)
+	je	.L2314
+	movl	-48(%rbp), %eax
+	cmpl	$24, %eax
+	jne	.L2315
+	movl	-24(%rbp), %eax
+	cmpl	$-16777216, %eax
+	jne	.L2315
+	movq	-96(%rbp), %rax
+	movl	$3, (%rax)
+	jmp	.L2314
+.L2315:
+	movl	-24(%rbp), %eax
+	testl	%eax, %eax
+	je	.L2316
+	movl	$4, %edx
+	jmp	.L2317
+.L2316:
+	movl	$3, %edx
+.L2317:
+	movq	-96(%rbp), %rax
+	movl	%edx, (%rax)
+.L2314:
+	movl	$1, %eax
+.L2318:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2319
+	call	__stack_chk_fail@PLT
+.L2319:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5074:
+	.size	stbi__bmp_info, .-stbi__bmp_info
+	.type	stbi__psd_info, @function
+stbi__psd_info:
+.LFB5075:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -40(%rbp)
+	movq	%rsi, -48(%rbp)
+	movq	%rdx, -56(%rbp)
+	movq	%rcx, -64(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	cmpq	$0, -48(%rbp)
+	jne	.L2321
+	leaq	-20(%rbp), %rax
+	movq	%rax, -48(%rbp)
+.L2321:
+	cmpq	$0, -56(%rbp)
+	jne	.L2322
+	leaq	-20(%rbp), %rax
+	movq	%rax, -56(%rbp)
+.L2322:
+	cmpq	$0, -64(%rbp)
+	jne	.L2323
+	leaq	-20(%rbp), %rax
+	movq	%rax, -64(%rbp)
+.L2323:
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	cmpl	$943870035, %eax
+	je	.L2324
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2331
+.L2324:
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	cmpl	$1, %eax
+	je	.L2326
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2331
+.L2326:
+	movq	-40(%rbp), %rax
+	movl	$6, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, -16(%rbp)
+	cmpl	$0, -16(%rbp)
+	js	.L2327
+	cmpl	$16, -16(%rbp)
+	jle	.L2328
+.L2327:
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2331
+.L2328:
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	movl	%eax, %edx
+	movq	-56(%rbp), %rax
+	movl	%edx, (%rax)
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	movl	%eax, %edx
+	movq	-48(%rbp), %rax
+	movl	%edx, (%rax)
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, -12(%rbp)
+	cmpl	$8, -12(%rbp)
+	je	.L2329
+	cmpl	$16, -12(%rbp)
+	je	.L2329
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2331
+.L2329:
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	cmpl	$3, %eax
+	je	.L2330
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2331
+.L2330:
+	movq	-64(%rbp), %rax
+	movl	$4, (%rax)
+	movl	$1, %eax
+.L2331:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2332
+	call	__stack_chk_fail@PLT
+.L2332:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5075:
+	.size	stbi__psd_info, .-stbi__psd_info
+	.type	stbi__psd_is16, @function
+stbi__psd_is16:
+.LFB5076:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get32be
+	cmpl	$943870035, %eax
+	je	.L2334
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2335
+.L2334:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	cmpl	$1, %eax
+	je	.L2336
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2335
+.L2336:
+	movq	-24(%rbp), %rax
+	movl	$6, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, -8(%rbp)
+	cmpl	$0, -8(%rbp)
+	js	.L2337
+	cmpl	$16, -8(%rbp)
+	jle	.L2338
+.L2337:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2335
+.L2338:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movl	%eax, -4(%rbp)
+	cmpl	$16, -4(%rbp)
+	je	.L2339
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2335
+.L2339:
+	movl	$1, %eax
+.L2335:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5076:
+	.size	stbi__psd_is16, .-stbi__psd_is16
+	.type	stbi__pic_info, @function
+stbi__pic_info:
+.LFB5077:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$112, %rsp
+	movq	%rdi, -88(%rbp)
+	movq	%rsi, -96(%rbp)
+	movq	%rdx, -104(%rbp)
+	movq	%rcx, -112(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movl	$0, -68(%rbp)
+	movl	$0, -64(%rbp)
+	cmpq	$0, -96(%rbp)
+	jne	.L2341
+	leaq	-72(%rbp), %rax
+	movq	%rax, -96(%rbp)
+.L2341:
+	cmpq	$0, -104(%rbp)
+	jne	.L2342
+	leaq	-72(%rbp), %rax
+	movq	%rax, -104(%rbp)
+.L2342:
+	cmpq	$0, -112(%rbp)
+	jne	.L2343
+	leaq	-72(%rbp), %rax
+	movq	%rax, -112(%rbp)
+.L2343:
+	movq	-88(%rbp), %rax
+	leaq	.LC84(%rip), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__pic_is4
+	testl	%eax, %eax
+	jne	.L2344
+	movq	-88(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2354
+.L2344:
+	movq	-88(%rbp), %rax
+	movl	$88, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+	movq	-88(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movq	-96(%rbp), %rdx
+	movl	%eax, (%rdx)
+	movq	-88(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get16be
+	movq	-104(%rbp), %rdx
+	movl	%eax, (%rdx)
+	movq	-88(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__at_eof
+	testl	%eax, %eax
+	je	.L2346
+	movq	-88(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2354
+.L2346:
+	movq	-96(%rbp), %rax
+	movl	(%rax), %eax
+	testl	%eax, %eax
+	je	.L2347
+	movq	-96(%rbp), %rax
+	movl	(%rax), %ecx
+	movl	$268435456, %eax
+	cltd
+	idivl	%ecx
+	movl	%eax, %edx
+	movq	-104(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	%eax, %edx
+	jge	.L2347
+	movq	-88(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2354
+.L2347:
+	movq	-88(%rbp), %rax
+	movl	$8, %esi
+	movq	%rax, %rdi
+	call	stbi__skip
+.L2351:
+	cmpl	$10, -64(%rbp)
+	jne	.L2348
+	movl	$0, %eax
+	jmp	.L2354
+.L2348:
+	movl	-64(%rbp), %eax
+	leal	1(%rax), %edx
+	movl	%edx, -64(%rbp)
+	leaq	-48(%rbp), %rcx
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	addq	%rcx, %rax
+	movq	%rax, -56(%rbp)
+	movq	-88(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movzbl	%al, %eax
+	movl	%eax, -60(%rbp)
+	movq	-88(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movq	-56(%rbp), %rdx
+	movb	%al, (%rdx)
+	movq	-88(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movq	-56(%rbp), %rdx
+	movb	%al, 1(%rdx)
+	movq	-88(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movq	-56(%rbp), %rdx
+	movb	%al, 2(%rdx)
+	movq	-56(%rbp), %rax
+	movzbl	2(%rax), %eax
+	movzbl	%al, %eax
+	orl	%eax, -68(%rbp)
+	movq	-88(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__at_eof
+	testl	%eax, %eax
+	je	.L2349
+	movq	-88(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2354
+.L2349:
+	movq	-56(%rbp), %rax
+	movzbl	(%rax), %eax
+	cmpb	$8, %al
+	je	.L2350
+	movq	-88(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2354
+.L2350:
+	cmpl	$0, -60(%rbp)
+	jne	.L2351
+	movl	-68(%rbp), %eax
+	andl	$16, %eax
+	testl	%eax, %eax
+	je	.L2352
+	movl	$4, %edx
+	jmp	.L2353
+.L2352:
+	movl	$3, %edx
+.L2353:
+	movq	-112(%rbp), %rax
+	movl	%edx, (%rax)
+	movl	$1, %eax
+.L2354:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2355
+	call	__stack_chk_fail@PLT
+.L2355:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5077:
+	.size	stbi__pic_info, .-stbi__pic_info
+	.type	stbi__pnm_test, @function
+stbi__pnm_test:
+.LFB5078:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -2(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -1(%rbp)
+	cmpb	$80, -2(%rbp)
+	jne	.L2357
+	cmpb	$53, -1(%rbp)
+	je	.L2358
+	cmpb	$54, -1(%rbp)
+	je	.L2358
+.L2357:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2359
+.L2358:
+	movl	$1, %eax
+.L2359:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5078:
+	.size	stbi__pnm_test, .-stbi__pnm_test
+	.type	stbi__pnm_load, @function
+stbi__pnm_load:
+.LFB5079:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movq	%rcx, -48(%rbp)
+	movl	%r8d, -52(%rbp)
+	movq	%r9, -64(%rbp)
+	movq	-24(%rbp), %rax
+	leaq	8(%rax), %rcx
+	movq	-24(%rbp), %rax
+	leaq	4(%rax), %rdx
+	movq	-24(%rbp), %rsi
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__pnm_info
+	movq	-64(%rbp), %rdx
+	movl	%eax, (%rdx)
+	movq	-64(%rbp), %rax
+	movl	(%rax), %eax
+	testl	%eax, %eax
+	jne	.L2361
+	movl	$0, %eax
+	jmp	.L2362
+.L2361:
+	movq	-24(%rbp), %rax
+	movl	4(%rax), %eax
+	cmpl	$16777216, %eax
+	jbe	.L2363
+	movl	$0, %eax
+	jmp	.L2362
+.L2363:
+	movq	-24(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	$16777216, %eax
+	jbe	.L2364
+	movl	$0, %eax
+	jmp	.L2362
+.L2364:
+	movq	-24(%rbp), %rax
+	movl	(%rax), %eax
+	movl	%eax, %edx
+	movq	-32(%rbp), %rax
+	movl	%edx, (%rax)
+	movq	-24(%rbp), %rax
+	movl	4(%rax), %eax
+	movl	%eax, %edx
+	movq	-40(%rbp), %rax
+	movl	%edx, (%rax)
+	cmpq	$0, -48(%rbp)
+	je	.L2365
+	movq	-24(%rbp), %rax
+	movl	8(%rax), %edx
+	movq	-48(%rbp), %rax
+	movl	%edx, (%rax)
+.L2365:
+	movq	-64(%rbp), %rax
+	movl	(%rax), %eax
+	leal	7(%rax), %edx
+	testl	%eax, %eax
+	cmovs	%edx, %eax
+	sarl	$3, %eax
+	movl	%eax, %ecx
+	movq	-24(%rbp), %rax
+	movl	4(%rax), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	(%rax), %eax
+	movl	%eax, %esi
+	movq	-24(%rbp), %rax
+	movl	8(%rax), %eax
+	movl	$0, %r8d
+	movl	%eax, %edi
+	call	stbi__mad4sizes_valid
+	testl	%eax, %eax
+	jne	.L2366
+	movl	$0, %eax
+	jmp	.L2362
+.L2366:
+	movq	-64(%rbp), %rax
+	movl	(%rax), %eax
+	leal	7(%rax), %edx
+	testl	%eax, %eax
+	cmovs	%edx, %eax
+	sarl	$3, %eax
+	movl	%eax, %ecx
+	movq	-24(%rbp), %rax
+	movl	4(%rax), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	(%rax), %eax
+	movl	%eax, %esi
+	movq	-24(%rbp), %rax
+	movl	8(%rax), %eax
+	movl	$0, %r8d
+	movl	%eax, %edi
+	call	stbi__malloc_mad4
+	movq	%rax, -8(%rbp)
+	cmpq	$0, -8(%rbp)
+	jne	.L2367
+	movl	$0, %eax
+	jmp	.L2362
+.L2367:
+	movq	-24(%rbp), %rax
+	movl	8(%rax), %eax
+	movl	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	(%rax), %eax
+	imull	%eax, %edx
+	movq	-24(%rbp), %rax
+	movl	4(%rax), %eax
+	imull	%eax, %edx
+	movq	-64(%rbp), %rax
+	movl	(%rax), %eax
+	leal	7(%rax), %ecx
+	testl	%eax, %eax
+	cmovs	%ecx, %eax
+	sarl	$3, %eax
+	imull	%edx, %eax
+	movl	%eax, %edx
+	movq	-8(%rbp), %rcx
+	movq	-24(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__getn
+	testl	%eax, %eax
+	jne	.L2368
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movl	$0, %eax
+	jmp	.L2362
+.L2368:
+	cmpl	$0, -52(%rbp)
+	je	.L2369
+	movq	-24(%rbp), %rax
+	movl	8(%rax), %eax
+	cmpl	%eax, -52(%rbp)
+	je	.L2369
+	movq	-64(%rbp), %rax
+	movl	(%rax), %eax
+	cmpl	$16, %eax
+	jne	.L2370
+	movq	-24(%rbp), %rax
+	movl	4(%rax), %edi
+	movq	-24(%rbp), %rax
+	movl	(%rax), %ecx
+	movq	-24(%rbp), %rax
+	movl	8(%rax), %esi
+	movl	-52(%rbp), %edx
+	movq	-8(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__convert_format16
+	movq	%rax, -8(%rbp)
+	jmp	.L2371
+.L2370:
+	movq	-24(%rbp), %rax
+	movl	4(%rax), %edi
+	movq	-24(%rbp), %rax
+	movl	(%rax), %ecx
+	movq	-24(%rbp), %rax
+	movl	8(%rax), %esi
+	movl	-52(%rbp), %edx
+	movq	-8(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	stbi__convert_format
+	movq	%rax, -8(%rbp)
+.L2371:
+	cmpq	$0, -8(%rbp)
+	jne	.L2369
+	movq	-8(%rbp), %rax
+	jmp	.L2362
+.L2369:
+	movq	-8(%rbp), %rax
+.L2362:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5079:
+	.size	stbi__pnm_load, .-stbi__pnm_load
+	.type	stbi__pnm_isspace, @function
+stbi__pnm_isspace:
+.LFB5080:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, %eax
+	movb	%al, -4(%rbp)
+	cmpb	$32, -4(%rbp)
+	je	.L2373
+	cmpb	$9, -4(%rbp)
+	je	.L2373
+	cmpb	$10, -4(%rbp)
+	je	.L2373
+	cmpb	$11, -4(%rbp)
+	je	.L2373
+	cmpb	$12, -4(%rbp)
+	je	.L2373
+	cmpb	$13, -4(%rbp)
+	jne	.L2374
+.L2373:
+	movl	$1, %eax
+	jmp	.L2376
+.L2374:
+	movl	$0, %eax
+.L2376:
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5080:
+	.size	stbi__pnm_isspace, .-stbi__pnm_isspace
+	.type	stbi__pnm_skip_whitespace, @function
+stbi__pnm_skip_whitespace:
+.LFB5081:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	%rsi, -16(%rbp)
+	jmp	.L2378
+.L2380:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movl	%eax, %edx
+	movq	-16(%rbp), %rax
+	movb	%dl, (%rax)
+.L2378:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__at_eof
+	testl	%eax, %eax
+	jne	.L2379
+	movq	-16(%rbp), %rax
+	movzbl	(%rax), %eax
+	movsbl	%al, %eax
+	movl	%eax, %edi
+	call	stbi__pnm_isspace
+	testl	%eax, %eax
+	jne	.L2380
+.L2379:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__at_eof
+	testl	%eax, %eax
+	jne	.L2386
+	movq	-16(%rbp), %rax
+	movzbl	(%rax), %eax
+	cmpb	$35, %al
+	jne	.L2386
+	jmp	.L2382
+.L2384:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movl	%eax, %edx
+	movq	-16(%rbp), %rax
+	movb	%dl, (%rax)
+.L2382:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__at_eof
+	testl	%eax, %eax
+	jne	.L2378
+	movq	-16(%rbp), %rax
+	movzbl	(%rax), %eax
+	cmpb	$10, %al
+	je	.L2378
+	movq	-16(%rbp), %rax
+	movzbl	(%rax), %eax
+	cmpb	$13, %al
+	jne	.L2384
+	jmp	.L2378
+.L2386:
+	nop
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5081:
+	.size	stbi__pnm_skip_whitespace, .-stbi__pnm_skip_whitespace
+	.type	stbi__pnm_isdigit, @function
+stbi__pnm_isdigit:
+.LFB5082:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movl	%edi, %eax
+	movb	%al, -4(%rbp)
+	cmpb	$47, -4(%rbp)
+	jle	.L2388
+	cmpb	$57, -4(%rbp)
+	jg	.L2388
+	movl	$1, %eax
+	jmp	.L2390
+.L2388:
+	movl	$0, %eax
+.L2390:
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5082:
+	.size	stbi__pnm_isdigit, .-stbi__pnm_isdigit
+	.section	.rodata
+.LC97:
+	.string	"integer parse overflow"
+	.text
+	.type	stbi__pnm_getinteger, @function
+stbi__pnm_getinteger:
+.LFB5083:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movl	$0, -4(%rbp)
+	jmp	.L2392
+.L2396:
+	movl	-4(%rbp), %edx
+	movl	%edx, %eax
+	sall	$2, %eax
+	addl	%edx, %eax
+	addl	%eax, %eax
+	movl	%eax, %edx
+	movq	-32(%rbp), %rax
+	movzbl	(%rax), %eax
+	movsbl	%al, %eax
+	subl	$48, %eax
+	addl	%edx, %eax
+	movl	%eax, -4(%rbp)
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movl	%eax, %edx
+	movq	-32(%rbp), %rax
+	movb	%dl, (%rax)
+	cmpl	$214748364, -4(%rbp)
+	jg	.L2393
+	cmpl	$214748364, -4(%rbp)
+	jne	.L2392
+	movq	-32(%rbp), %rax
+	movzbl	(%rax), %eax
+	cmpb	$55, %al
+	jle	.L2392
+.L2393:
+	leaq	.LC97(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L2394
+.L2392:
+	movq	-24(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__at_eof
+	testl	%eax, %eax
+	jne	.L2395
+	movq	-32(%rbp), %rax
+	movzbl	(%rax), %eax
+	movsbl	%al, %eax
+	movl	%eax, %edi
+	call	stbi__pnm_isdigit
+	testl	%eax, %eax
+	jne	.L2396
+.L2395:
+	movl	-4(%rbp), %eax
+.L2394:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5083:
+	.size	stbi__pnm_getinteger, .-stbi__pnm_getinteger
+	.section	.rodata
+.LC98:
+	.string	"invalid width"
+.LC99:
+	.string	"max value > 65535"
+	.text
+	.type	stbi__pnm_info, @function
+stbi__pnm_info:
+.LFB5084:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movq	%rdi, -40(%rbp)
+	movq	%rsi, -48(%rbp)
+	movq	%rdx, -56(%rbp)
+	movq	%rcx, -64(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	cmpq	$0, -48(%rbp)
+	jne	.L2398
+	leaq	-16(%rbp), %rax
+	movq	%rax, -48(%rbp)
+.L2398:
+	cmpq	$0, -56(%rbp)
+	jne	.L2399
+	leaq	-16(%rbp), %rax
+	movq	%rax, -56(%rbp)
+.L2399:
+	cmpq	$0, -64(%rbp)
+	jne	.L2400
+	leaq	-16(%rbp), %rax
+	movq	%rax, -64(%rbp)
+.L2400:
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -18(%rbp)
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -17(%rbp)
+	cmpb	$80, -18(%rbp)
+	jne	.L2401
+	cmpb	$53, -17(%rbp)
+	je	.L2402
+	cmpb	$54, -17(%rbp)
+	je	.L2402
+.L2401:
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__rewind
+	movl	$0, %eax
+	jmp	.L2410
+.L2402:
+	cmpb	$54, -17(%rbp)
+	jne	.L2404
+	movl	$3, %edx
+	jmp	.L2405
+.L2404:
+	movl	$1, %edx
+.L2405:
+	movq	-64(%rbp), %rax
+	movl	%edx, (%rax)
+	movq	-40(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__get8
+	movb	%al, -19(%rbp)
+	leaq	-19(%rbp), %rdx
+	movq	-40(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__pnm_skip_whitespace
+	leaq	-19(%rbp), %rdx
+	movq	-40(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__pnm_getinteger
+	movq	-48(%rbp), %rdx
+	movl	%eax, (%rdx)
+	movq	-48(%rbp), %rax
+	movl	(%rax), %eax
+	testl	%eax, %eax
+	jne	.L2406
+	leaq	.LC98(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L2410
+.L2406:
+	leaq	-19(%rbp), %rdx
+	movq	-40(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__pnm_skip_whitespace
+	leaq	-19(%rbp), %rdx
+	movq	-40(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__pnm_getinteger
+	movq	-56(%rbp), %rdx
+	movl	%eax, (%rdx)
+	movq	-56(%rbp), %rax
+	movl	(%rax), %eax
+	testl	%eax, %eax
+	jne	.L2407
+	leaq	.LC98(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L2410
+.L2407:
+	leaq	-19(%rbp), %rdx
+	movq	-40(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__pnm_skip_whitespace
+	leaq	-19(%rbp), %rdx
+	movq	-40(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__pnm_getinteger
+	movl	%eax, -12(%rbp)
+	cmpl	$65535, -12(%rbp)
+	jle	.L2408
+	leaq	.LC99(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L2410
+.L2408:
+	cmpl	$255, -12(%rbp)
+	jle	.L2409
+	movl	$16, %eax
+	jmp	.L2410
+.L2409:
+	movl	$8, %eax
+.L2410:
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2411
+	call	__stack_chk_fail@PLT
+.L2411:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5084:
+	.size	stbi__pnm_info, .-stbi__pnm_info
+	.type	stbi__pnm_is16, @function
+stbi__pnm_is16:
+.LFB5085:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movl	$0, %ecx
+	movl	$0, %edx
+	movl	$0, %esi
+	movq	%rax, %rdi
+	call	stbi__pnm_info
+	cmpl	$16, %eax
+	jne	.L2413
+	movl	$1, %eax
+	jmp	.L2414
+.L2413:
+	movl	$0, %eax
+.L2414:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5085:
+	.size	stbi__pnm_is16, .-stbi__pnm_is16
+	.section	.rodata
+.LC100:
+	.string	"unknown image type"
+	.text
+	.type	stbi__info_main, @function
+stbi__info_main:
+.LFB5086:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	%rsi, -16(%rbp)
+	movq	%rdx, -24(%rbp)
+	movq	%rcx, -32(%rbp)
+	movq	-32(%rbp), %rcx
+	movq	-24(%rbp), %rdx
+	movq	-16(%rbp), %rsi
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__jpeg_info
+	testl	%eax, %eax
+	je	.L2416
+	movl	$1, %eax
+	jmp	.L2417
+.L2416:
+	movq	-32(%rbp), %rcx
+	movq	-24(%rbp), %rdx
+	movq	-16(%rbp), %rsi
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__png_info
+	testl	%eax, %eax
+	je	.L2418
+	movl	$1, %eax
+	jmp	.L2417
+.L2418:
+	movq	-32(%rbp), %rcx
+	movq	-24(%rbp), %rdx
+	movq	-16(%rbp), %rsi
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__gif_info
+	testl	%eax, %eax
+	je	.L2419
+	movl	$1, %eax
+	jmp	.L2417
+.L2419:
+	movq	-32(%rbp), %rcx
+	movq	-24(%rbp), %rdx
+	movq	-16(%rbp), %rsi
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__bmp_info
+	testl	%eax, %eax
+	je	.L2420
+	movl	$1, %eax
+	jmp	.L2417
+.L2420:
+	movq	-32(%rbp), %rcx
+	movq	-24(%rbp), %rdx
+	movq	-16(%rbp), %rsi
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__psd_info
+	testl	%eax, %eax
+	je	.L2421
+	movl	$1, %eax
+	jmp	.L2417
+.L2421:
+	movq	-32(%rbp), %rcx
+	movq	-24(%rbp), %rdx
+	movq	-16(%rbp), %rsi
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__pic_info
+	testl	%eax, %eax
+	je	.L2422
+	movl	$1, %eax
+	jmp	.L2417
+.L2422:
+	movq	-32(%rbp), %rcx
+	movq	-24(%rbp), %rdx
+	movq	-16(%rbp), %rsi
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__pnm_info
+	testl	%eax, %eax
+	je	.L2423
+	movl	$1, %eax
+	jmp	.L2417
+.L2423:
+	movq	-32(%rbp), %rcx
+	movq	-24(%rbp), %rdx
+	movq	-16(%rbp), %rsi
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__hdr_info
+	testl	%eax, %eax
+	je	.L2424
+	movl	$1, %eax
+	jmp	.L2417
+.L2424:
+	movq	-32(%rbp), %rcx
+	movq	-24(%rbp), %rdx
+	movq	-16(%rbp), %rsi
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__tga_info
+	testl	%eax, %eax
+	je	.L2425
+	movl	$1, %eax
+	jmp	.L2417
+.L2425:
+	leaq	.LC100(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+.L2417:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5086:
+	.size	stbi__info_main, .-stbi__info_main
+	.type	stbi__is_16_main, @function
+stbi__is_16_main:
+.LFB5087:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__png_is16
+	testl	%eax, %eax
+	je	.L2427
+	movl	$1, %eax
+	jmp	.L2428
+.L2427:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__psd_is16
+	testl	%eax, %eax
+	je	.L2429
+	movl	$1, %eax
+	jmp	.L2428
+.L2429:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__pnm_is16
+	testl	%eax, %eax
+	je	.L2430
+	movl	$1, %eax
+	jmp	.L2428
+.L2430:
+	movl	$0, %eax
+.L2428:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5087:
+	.size	stbi__is_16_main, .-stbi__is_16_main
+	.section	.rodata
+.LC101:
+	.string	"can't fopen"
+	.text
+	.globl	stbi_info
+	.type	stbi_info, @function
+stbi_info:
+.LFB5088:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	%rsi, -32(%rbp)
+	movq	%rdx, -40(%rbp)
+	movq	%rcx, -48(%rbp)
+	movq	-24(%rbp), %rax
+	leaq	.LC2(%rip), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__fopen
+	movq	%rax, -8(%rbp)
+	cmpq	$0, -8(%rbp)
+	jne	.L2432
+	leaq	.LC101(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L2433
+.L2432:
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rdx
+	movq	-32(%rbp), %rsi
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi_info_from_file
+	movl	%eax, -12(%rbp)
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	fclose@PLT
+	movl	-12(%rbp), %eax
+.L2433:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5088:
+	.size	stbi_info, .-stbi_info
+	.globl	stbi_info_from_file
+	.type	stbi_info_from_file, @function
+stbi_info_from_file:
+.LFB5089:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$288, %rsp
+	movq	%rdi, -264(%rbp)
+	movq	%rsi, -272(%rbp)
+	movq	%rdx, -280(%rbp)
+	movq	%rcx, -288(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-264(%rbp), %rax
+	movq	%rax, %rdi
+	call	ftell@PLT
+	movq	%rax, -248(%rbp)
+	movq	-264(%rbp), %rdx
+	leaq	-240(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_file
+	movq	-288(%rbp), %rcx
+	movq	-280(%rbp), %rdx
+	movq	-272(%rbp), %rsi
+	leaq	-240(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__info_main
+	movl	%eax, -252(%rbp)
+	movq	-248(%rbp), %rcx
+	movq	-264(%rbp), %rax
+	movl	$0, %edx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	fseek@PLT
+	movl	-252(%rbp), %eax
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2436
+	call	__stack_chk_fail@PLT
+.L2436:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5089:
+	.size	stbi_info_from_file, .-stbi_info_from_file
+	.globl	stbi_is_16_bit
+	.type	stbi_is_16_bit, @function
+stbi_is_16_bit:
+.LFB5090:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -24(%rbp)
+	movq	-24(%rbp), %rax
+	leaq	.LC2(%rip), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__fopen
+	movq	%rax, -8(%rbp)
+	cmpq	$0, -8(%rbp)
+	jne	.L2438
+	leaq	.LC101(%rip), %rax
+	movq	%rax, %rdi
+	call	stbi__err
+	jmp	.L2439
+.L2438:
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi_is_16_bit_from_file
+	movl	%eax, -12(%rbp)
+	movq	-8(%rbp), %rax
+	movq	%rax, %rdi
+	call	fclose@PLT
+	movl	-12(%rbp), %eax
+.L2439:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5090:
+	.size	stbi_is_16_bit, .-stbi_is_16_bit
+	.globl	stbi_is_16_bit_from_file
+	.type	stbi_is_16_bit_from_file, @function
+stbi_is_16_bit_from_file:
+.LFB5091:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$272, %rsp
+	movq	%rdi, -264(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-264(%rbp), %rax
+	movq	%rax, %rdi
+	call	ftell@PLT
+	movq	%rax, -248(%rbp)
+	movq	-264(%rbp), %rdx
+	leaq	-240(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_file
+	leaq	-240(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__is_16_main
+	movl	%eax, -252(%rbp)
+	movq	-248(%rbp), %rcx
+	movq	-264(%rbp), %rax
+	movl	$0, %edx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	fseek@PLT
+	movl	-252(%rbp), %eax
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2442
+	call	__stack_chk_fail@PLT
+.L2442:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5091:
+	.size	stbi_is_16_bit_from_file, .-stbi_is_16_bit_from_file
+	.globl	stbi_info_from_memory
+	.type	stbi_info_from_memory, @function
+stbi_info_from_memory:
+.LFB5092:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$288, %rsp
+	movq	%rdi, -248(%rbp)
+	movl	%esi, -252(%rbp)
+	movq	%rdx, -264(%rbp)
+	movq	%rcx, -272(%rbp)
+	movq	%r8, -280(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movl	-252(%rbp), %edx
+	movq	-248(%rbp), %rcx
+	leaq	-240(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_mem
+	movq	-280(%rbp), %rcx
+	movq	-272(%rbp), %rdx
+	movq	-264(%rbp), %rsi
+	leaq	-240(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__info_main
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2445
+	call	__stack_chk_fail@PLT
+.L2445:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5092:
+	.size	stbi_info_from_memory, .-stbi_info_from_memory
+	.globl	stbi_info_from_callbacks
+	.type	stbi_info_from_callbacks, @function
+stbi_info_from_callbacks:
+.LFB5093:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$288, %rsp
+	movq	%rdi, -248(%rbp)
+	movq	%rsi, -256(%rbp)
+	movq	%rdx, -264(%rbp)
+	movq	%rcx, -272(%rbp)
+	movq	%r8, -280(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-256(%rbp), %rdx
+	movq	-248(%rbp), %rcx
+	leaq	-240(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_callbacks
+	movq	-280(%rbp), %rcx
+	movq	-272(%rbp), %rdx
+	movq	-264(%rbp), %rsi
+	leaq	-240(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__info_main
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2448
+	call	__stack_chk_fail@PLT
+.L2448:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5093:
+	.size	stbi_info_from_callbacks, .-stbi_info_from_callbacks
+	.globl	stbi_is_16_bit_from_memory
+	.type	stbi_is_16_bit_from_memory, @function
+stbi_is_16_bit_from_memory:
+.LFB5094:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$256, %rsp
+	movq	%rdi, -248(%rbp)
+	movl	%esi, -252(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movl	-252(%rbp), %edx
+	movq	-248(%rbp), %rcx
+	leaq	-240(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_mem
+	leaq	-240(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__is_16_main
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2451
+	call	__stack_chk_fail@PLT
+.L2451:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5094:
+	.size	stbi_is_16_bit_from_memory, .-stbi_is_16_bit_from_memory
+	.globl	stbi_is_16_bit_from_callbacks
+	.type	stbi_is_16_bit_from_callbacks, @function
+stbi_is_16_bit_from_callbacks:
+.LFB5095:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$256, %rsp
+	movq	%rdi, -248(%rbp)
+	movq	%rsi, -256(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	-256(%rbp), %rdx
+	movq	-248(%rbp), %rcx
+	leaq	-240(%rbp), %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	stbi__start_callbacks
+	leaq	-240(%rbp), %rax
+	movq	%rax, %rdi
+	call	stbi__is_16_main
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2454
+	call	__stack_chk_fail@PLT
+.L2454:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5095:
+	.size	stbi_is_16_bit_from_callbacks, .-stbi_is_16_bit_from_callbacks
+	.section	.rodata
+.LC102:
+	.string	"ERROR loading file %s: %s"
+.LC103:
+	.string	"SDL Error: %s\n"
+	.text
+	.globl	surface_from_file
+	.type	surface_from_file, @function
+surface_from_file:
+.LFB5096:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$80, %rsp
+	movq	%rdi, -72(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	leaq	-52(%rbp), %rcx
+	leaq	-56(%rbp), %rdx
+	leaq	-60(%rbp), %rsi
+	movq	-72(%rbp), %rax
+	movl	$4, %r8d
+	movq	%rax, %rdi
+	call	stbi_load
+	movq	%rax, -24(%rbp)
+	cmpq	$0, -24(%rbp)
+	jne	.L2456
+	call	stbi_failure_reason
+	movq	%rax, %rcx
+	movq	stderr(%rip), %rax
+	movq	-72(%rbp), %rdx
+	leaq	.LC102(%rip), %rsi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	fprintf@PLT
+	movl	$1, %edi
+	call	exit@PLT
+.L2456:
+	movl	$-16777216, -48(%rbp)
+	movl	$16711680, -44(%rbp)
+	movl	$65280, -40(%rbp)
+	movl	$255, -36(%rbp)
+	movl	$32, -32(%rbp)
+	movl	-60(%rbp), %eax
+	sall	$2, %eax
+	movl	%eax, -28(%rbp)
+	movl	-56(%rbp), %edx
+	movl	-60(%rbp), %esi
+	movl	-48(%rbp), %r9d
+	movl	-28(%rbp), %r8d
+	movl	-32(%rbp), %ecx
+	movq	-24(%rbp), %rax
+	subq	$8, %rsp
+	movl	-36(%rbp), %edi
+	pushq	%rdi
+	movl	-40(%rbp), %edi
+	pushq	%rdi
+	movl	-44(%rbp), %edi
+	pushq	%rdi
+	movq	%rax, %rdi
+	call	SDL_CreateRGBSurfaceFrom@PLT
+	addq	$32, %rsp
+	movq	%rax, -16(%rbp)
+	cmpq	$0, -16(%rbp)
+	jne	.L2457
+	call	SDL_GetError@PLT
+	movq	%rax, %rdx
+	movq	stderr(%rip), %rax
+	leaq	.LC103(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	fprintf@PLT
+	movl	$1, %edi
+	call	exit@PLT
+.L2457:
+	movq	-16(%rbp), %rax
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2459
+	call	__stack_chk_fail@PLT
+.L2459:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5096:
+	.size	surface_from_file, .-surface_from_file
+	.globl	font_create
+	.type	font_create, @function
+font_create:
+.LFB5097:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$136, %rsp
+	.cfi_offset 3, -24
+	movq	%rdi, -120(%rbp)
+	movq	%rsi, -128(%rbp)
+	movq	%rdx, -136(%rbp)
+	movl	%ecx, -140(%rbp)
+	movl	%r8d, -144(%rbp)
+	pxor	%xmm0, %xmm0
+	movaps	%xmm0, -80(%rbp)
+	movaps	%xmm0, -64(%rbp)
+	movaps	%xmm0, -48(%rbp)
+	movq	%xmm0, -32(%rbp)
+	movl	-140(%rbp), %eax
+	movl	%eax, -40(%rbp)
+	movl	-144(%rbp), %eax
+	movl	%eax, -36(%rbp)
+	movq	-136(%rbp), %rax
+	movq	%rax, %rdi
+	call	surface_from_file
+	movq	%rax, -80(%rbp)
+	movq	-80(%rbp), %rax
+	testq	%rax, %rax
+	jne	.L2461
+	call	SDL_GetError@PLT
+	movq	%rax, %rdx
+	movq	stderr(%rip), %rax
+	leaq	.LC103(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	fprintf@PLT
+	movl	$1, %edi
+	call	exit@PLT
+.L2461:
+	movq	-80(%rbp), %rax
+	movl	$0, %edx
+	movl	$1, %esi
+	movq	%rax, %rdi
+	call	SDL_SetColorKey@PLT
+	movq	-80(%rbp), %rdx
+	movq	-128(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	SDL_CreateTextureFromSurface@PLT
+	movq	%rax, -72(%rbp)
+	movq	-72(%rbp), %rax
+	testq	%rax, %rax
+	jne	.L2462
+	call	SDL_GetError@PLT
+	movq	%rax, %rdx
+	movq	stderr(%rip), %rax
+	leaq	.LC103(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	fprintf@PLT
+	movl	$1, %edi
+	call	exit@PLT
+.L2462:
+	movq	-80(%rbp), %rax
+	movl	16(%rax), %eax
+	movl	%eax, -48(%rbp)
+	movq	-80(%rbp), %rax
+	movl	20(%rax), %eax
+	movl	%eax, -44(%rbp)
+	movl	-48(%rbp), %eax
+	movl	-36(%rbp), %ebx
+	movl	$0, %edx
+	divl	%ebx
+	movl	%eax, -32(%rbp)
+	movl	-44(%rbp), %eax
+	movl	-40(%rbp), %ebx
+	movl	$0, %edx
+	divl	%ebx
+	movl	%eax, -28(%rbp)
+	movl	$128, %esi
+	movl	$16, %edi
+	call	calloc@PLT
+	movq	%rax, -64(%rbp)
+	movq	$0, -104(%rbp)
+	jmp	.L2463
+.L2464:
+	movl	-36(%rbp), %eax
+	movl	%eax, %ecx
+	movq	-104(%rbp), %rax
+	movl	$0, %edx
+	divq	%rcx
+	movq	%rdx, -96(%rbp)
+	movl	-36(%rbp), %eax
+	movl	%eax, %ebx
+	movq	-104(%rbp), %rax
+	movl	$0, %edx
+	divq	%rbx
+	movq	%rax, -88(%rbp)
+	movq	-64(%rbp), %rax
+	movq	-104(%rbp), %rdx
+	salq	$4, %rdx
+	addq	%rdx, %rax
+	movq	-96(%rbp), %rdx
+	movl	%edx, %ecx
+	movl	-32(%rbp), %edx
+	imull	%ecx, %edx
+	movl	%edx, %edi
+	movq	-88(%rbp), %rdx
+	movl	%edx, %ecx
+	movl	-28(%rbp), %edx
+	imull	%ecx, %edx
+	movl	%edx, %esi
+	movl	-32(%rbp), %edx
+	movl	%edx, %ecx
+	movl	-28(%rbp), %edx
+	movl	%edi, (%rax)
+	movl	%esi, 4(%rax)
+	movl	%ecx, 8(%rax)
+	movl	%edx, 12(%rax)
+	addq	$1, -104(%rbp)
+.L2463:
+	cmpq	$127, -104(%rbp)
+	jbe	.L2464
+	movq	-120(%rbp), %rax
+	movq	-80(%rbp), %rcx
+	movq	-72(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	-64(%rbp), %rcx
+	movq	-56(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	-32(%rbp), %rdx
+	movq	%rdx, 48(%rax)
+	movq	-120(%rbp), %rax
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5097:
+	.size	font_create, .-font_create
+	.globl	vec2
+	.type	vec2, @function
+vec2:
+.LFB5098:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	.cfi_offset 3, -24
+	movsd	%xmm0, -24(%rbp)
+	movsd	%xmm1, -32(%rbp)
+	movq	-24(%rbp), %rax
+	movq	-32(%rbp), %rdx
+	movq	%rax, -48(%rbp)
+	movq	%rdx, -40(%rbp)
+	movdqa	-48(%rbp), %xmm0
+	movq	%rdx, %xmm1
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5098:
+	.size	vec2, .-vec2
+	.globl	vec2s
+	.type	vec2s, @function
+vec2s:
+.LFB5099:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	.cfi_offset 3, -24
+	movsd	%xmm0, -24(%rbp)
+	movq	-24(%rbp), %rax
+	movq	-24(%rbp), %rdx
+	movq	%rax, -48(%rbp)
+	movq	%rdx, -40(%rbp)
+	movdqa	-48(%rbp), %xmm0
+	movq	%rdx, %xmm1
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5099:
+	.size	vec2s, .-vec2s
+	.globl	vec2_add
+	.type	vec2_add, @function
+vec2_add:
+.LFB5100:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	.cfi_offset 3, -24
+	movl	$0, %ecx
+	movl	$0, %ebx
+	movq	%xmm0, %rcx
+	movq	%xmm1, %rbx
+	movq	%rcx, -32(%rbp)
+	movq	%rbx, -24(%rbp)
+	movapd	%xmm2, %xmm1
+	movapd	%xmm3, %xmm0
+	movl	$0, %ecx
+	movl	$0, %ebx
+	movq	%xmm1, %rcx
+	movq	%xmm0, %rbx
+	movq	%rcx, -48(%rbp)
+	movq	%rbx, -40(%rbp)
+	movsd	-32(%rbp), %xmm1
+	movsd	-48(%rbp), %xmm0
+	movapd	%xmm1, %xmm2
+	addsd	%xmm0, %xmm2
+	movsd	-24(%rbp), %xmm1
+	movsd	-40(%rbp), %xmm0
+	addsd	%xmm1, %xmm0
+	movq	%xmm2, %rax
+	movq	%xmm0, %rdx
+	movq	%rax, -64(%rbp)
+	movq	%rdx, -56(%rbp)
+	movdqa	-64(%rbp), %xmm0
+	movq	%rdx, %xmm1
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5100:
+	.size	vec2_add, .-vec2_add
+	.globl	vec2_sub
+	.type	vec2_sub, @function
+vec2_sub:
+.LFB5101:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	.cfi_offset 3, -24
+	movl	$0, %ecx
+	movl	$0, %ebx
+	movq	%xmm0, %rcx
+	movq	%xmm1, %rbx
+	movq	%rcx, -32(%rbp)
+	movq	%rbx, -24(%rbp)
+	movapd	%xmm2, %xmm1
+	movapd	%xmm3, %xmm0
+	movl	$0, %ecx
+	movl	$0, %ebx
+	movq	%xmm1, %rcx
+	movq	%xmm0, %rbx
+	movq	%rcx, -48(%rbp)
+	movq	%rbx, -40(%rbp)
+	movsd	-32(%rbp), %xmm0
+	movsd	-48(%rbp), %xmm1
+	movapd	%xmm0, %xmm2
+	subsd	%xmm1, %xmm2
+	movsd	-24(%rbp), %xmm0
+	movsd	-40(%rbp), %xmm1
+	subsd	%xmm1, %xmm0
+	movq	%xmm2, %rax
+	movq	%xmm0, %rdx
+	movq	%rax, -64(%rbp)
+	movq	%rdx, -56(%rbp)
+	movdqa	-64(%rbp), %xmm0
+	movq	%rdx, %xmm1
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5101:
+	.size	vec2_sub, .-vec2_sub
+	.globl	vec2_mul
+	.type	vec2_mul, @function
+vec2_mul:
+.LFB5102:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	.cfi_offset 3, -24
+	movl	$0, %ecx
+	movl	$0, %ebx
+	movq	%xmm0, %rcx
+	movq	%xmm1, %rbx
+	movq	%rcx, -32(%rbp)
+	movq	%rbx, -24(%rbp)
+	movapd	%xmm2, %xmm1
+	movapd	%xmm3, %xmm0
+	movl	$0, %ecx
+	movl	$0, %ebx
+	movq	%xmm1, %rcx
+	movq	%xmm0, %rbx
+	movq	%rcx, -48(%rbp)
+	movq	%rbx, -40(%rbp)
+	movsd	-32(%rbp), %xmm1
+	movsd	-48(%rbp), %xmm0
+	movapd	%xmm1, %xmm2
+	mulsd	%xmm0, %xmm2
+	movsd	-24(%rbp), %xmm1
+	movsd	-40(%rbp), %xmm0
+	mulsd	%xmm1, %xmm0
+	movq	%xmm2, %rax
+	movq	%xmm0, %rdx
+	movq	%rax, -64(%rbp)
+	movq	%rdx, -56(%rbp)
+	movdqa	-64(%rbp), %xmm0
+	movq	%rdx, %xmm1
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5102:
+	.size	vec2_mul, .-vec2_mul
+	.globl	vec2_div
+	.type	vec2_div, @function
+vec2_div:
+.LFB5103:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	.cfi_offset 3, -24
+	movl	$0, %ecx
+	movl	$0, %ebx
+	movq	%xmm0, %rcx
+	movq	%xmm1, %rbx
+	movq	%rcx, -32(%rbp)
+	movq	%rbx, -24(%rbp)
+	movapd	%xmm2, %xmm1
+	movapd	%xmm3, %xmm0
+	movl	$0, %ecx
+	movl	$0, %ebx
+	movq	%xmm1, %rcx
+	movq	%xmm0, %rbx
+	movq	%rcx, -48(%rbp)
+	movq	%rbx, -40(%rbp)
+	movsd	-32(%rbp), %xmm0
+	movsd	-48(%rbp), %xmm1
+	movapd	%xmm0, %xmm2
+	divsd	%xmm1, %xmm2
+	movsd	-24(%rbp), %xmm0
+	movsd	-40(%rbp), %xmm1
+	divsd	%xmm1, %xmm0
+	movq	%xmm2, %rax
+	movq	%xmm0, %rdx
+	movq	%rax, -64(%rbp)
+	movq	%rdx, -56(%rbp)
+	movdqa	-64(%rbp), %xmm0
+	movq	%rdx, %xmm1
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5103:
+	.size	vec2_div, .-vec2_div
+	.globl	window_create
+	.type	window_create, @function
+window_create:
+.LFB5104:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -40(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	$0, -32(%rbp)
+	movq	$0, -24(%rbp)
+	movq	-40(%rbp), %rax
+	movl	$32, %r9d
+	movl	$800, %r8d
+	movl	$600, %ecx
+	movl	$536805376, %edx
+	movl	$536805376, %esi
+	movq	%rax, %rdi
+	call	SDL_CreateWindow@PLT
+	movq	%rax, -32(%rbp)
+	movq	-32(%rbp), %rax
+	testq	%rax, %rax
+	jne	.L2479
+	call	SDL_GetError@PLT
+	movq	%rax, %rdx
+	movq	stderr(%rip), %rax
+	leaq	.LC103(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	fprintf@PLT
+	movl	$1, %edi
+	call	exit@PLT
+.L2479:
+	movq	-32(%rbp), %rax
+	leaq	-32(%rbp), %rdx
+	addq	$8, %rdx
+	leaq	-32(%rbp), %rcx
+	addq	$12, %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	SDL_GetWindowSize@PLT
+	movq	-32(%rbp), %rax
+	movq	-24(%rbp), %rdx
+	movq	-8(%rbp), %rcx
+	subq	%fs:40, %rcx
+	je	.L2481
+	call	__stack_chk_fail@PLT
+.L2481:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5104:
+	.size	window_create, .-window_create
+	.globl	buffer_create
+	.type	buffer_create, @function
+buffer_create:
+.LFB5105:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$48, %rsp
+	movq	%rdi, -40(%rbp)
+	pxor	%xmm0, %xmm0
+	movaps	%xmm0, -32(%rbp)
+	movaps	%xmm0, -16(%rbp)
+	movq	$16, -16(%rbp)
+	movq	$0, -24(%rbp)
+	movq	-16(%rbp), %rax
+	movl	$1, %esi
+	movq	%rax, %rdi
+	call	calloc@PLT
+	movq	%rax, -32(%rbp)
+	movq	-40(%rbp), %rcx
+	movq	-32(%rbp), %rax
+	movq	-24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	-16(%rbp), %rax
+	movq	-8(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	movq	-40(%rbp), %rax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5105:
+	.size	buffer_create, .-buffer_create
+	.globl	buffer_destroy
+	.type	buffer_destroy, @function
+buffer_destroy:
+.LFB5106:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	16(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	nop
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5106:
+	.size	buffer_destroy, .-buffer_destroy
+	.globl	buffer_mv_cur_letf
+	.type	buffer_mv_cur_letf, @function
+buffer_mv_cur_letf:
+.LFB5107:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -8(%rbp)
+	movq	40(%rbp), %rax
+	testq	%rax, %rax
+	jne	.L2486
+	movq	-8(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	jmp	.L2487
+.L2486:
+	movq	40(%rbp), %rax
+	subq	$1, %rax
+	movq	%rax, 40(%rbp)
+	movq	-8(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+.L2487:
+	movq	-8(%rbp), %rax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5107:
+	.size	buffer_mv_cur_letf, .-buffer_mv_cur_letf
+	.globl	buffer_mv_cur_right
+	.type	buffer_mv_cur_right, @function
+buffer_mv_cur_right:
+.LFB5108:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -8(%rbp)
+	movq	40(%rbp), %rdx
+	movq	24(%rbp), %rax
+	cmpq	%rax, %rdx
+	jne	.L2489
+	movq	-8(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	jmp	.L2490
+.L2489:
+	movq	40(%rbp), %rax
+	addq	$1, %rax
+	movq	%rax, 40(%rbp)
+	movq	-8(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+.L2490:
+	movq	-8(%rbp), %rax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5108:
+	.size	buffer_mv_cur_right, .-buffer_mv_cur_right
+	.globl	buffer_mv_cur_up
+	.type	buffer_mv_cur_up, @function
+buffer_mv_cur_up:
+.LFB5109:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -8(%rbp)
+	movq	-8(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	movq	-8(%rbp), %rax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5109:
+	.size	buffer_mv_cur_up, .-buffer_mv_cur_up
+	.globl	buffer_mv_cur_down
+	.type	buffer_mv_cur_down, @function
+buffer_mv_cur_down:
+.LFB5110:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	movq	%rdi, -8(%rbp)
+	movq	-8(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	movq	-8(%rbp), %rax
+	popq	%rbp
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5110:
+	.size	buffer_mv_cur_down, .-buffer_mv_cur_down
+	.globl	buffer_insert_char
+	.type	buffer_insert_char, @function
+buffer_insert_char:
+.LFB5111:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	%rsi, -16(%rbp)
+	movl	%edx, %eax
+	movb	%al, -20(%rbp)
+	movq	24(%rbp), %rax
+	cmpq	-16(%rbp), %rax
+	jnb	.L2496
+	movq	24(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	32(%rbp), %rax
+	cmpq	%rax, %rdx
+	jb	.L2497
+	movq	32(%rbp), %rax
+	addq	%rax, %rax
+	movq	%rax, 32(%rbp)
+	movq	32(%rbp), %rdx
+	movq	16(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	realloc@PLT
+	movq	%rax, 16(%rbp)
+.L2497:
+	movq	16(%rbp), %rcx
+	movq	24(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	%rdx, 24(%rbp)
+	leaq	(%rcx,%rax), %rdx
+	movzbl	-20(%rbp), %eax
+	movb	%al, (%rdx)
+	movq	40(%rbp), %rax
+	addq	$1, %rax
+	movq	%rax, 40(%rbp)
+	movq	-8(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	jmp	.L2498
+.L2496:
+	movq	24(%rbp), %rax
+	cmpq	-16(%rbp), %rax
+	jb	.L2500
+	movq	24(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	32(%rbp), %rax
+	cmpq	%rax, %rdx
+	jb	.L2501
+	movq	32(%rbp), %rax
+	addq	%rax, %rax
+	movq	%rax, 32(%rbp)
+	movq	32(%rbp), %rdx
+	movq	16(%rbp), %rax
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	realloc@PLT
+	movq	%rax, 16(%rbp)
+.L2501:
+	movq	32(%rbp), %rax
+	subq	-16(%rbp), %rax
+	movq	16(%rbp), %rcx
+	movq	-16(%rbp), %rdx
+	leaq	(%rcx,%rdx), %rsi
+	movq	16(%rbp), %rdx
+	movq	-16(%rbp), %rcx
+	addq	$1, %rcx
+	addq	%rdx, %rcx
+	movq	%rax, %rdx
+	movq	%rcx, %rdi
+	call	memmove@PLT
+	movq	16(%rbp), %rdx
+	movq	-16(%rbp), %rax
+	addq	%rax, %rdx
+	movzbl	-20(%rbp), %eax
+	movb	%al, (%rdx)
+	movq	24(%rbp), %rax
+	addq	$1, %rax
+	movq	%rax, 24(%rbp)
+.L2500:
+	movq	40(%rbp), %rax
+	addq	$1, %rax
+	movq	%rax, 40(%rbp)
+	movq	-8(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+.L2498:
+	movq	-8(%rbp), %rax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5111:
+	.size	buffer_insert_char, .-buffer_insert_char
+	.globl	buffer_remove_char_mv_cur
+	.type	buffer_remove_char_mv_cur, @function
+buffer_remove_char_mv_cur:
+.LFB5112:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	%rsi, -16(%rbp)
+	movq	24(%rbp), %rax
+	testq	%rax, %rax
+	jne	.L2503
+	movq	-8(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	jmp	.L2504
+.L2503:
+	movq	24(%rbp), %rax
+	cmpq	-16(%rbp), %rax
+	jnb	.L2505
+	movq	-8(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	jmp	.L2504
+.L2505:
+	movq	24(%rbp), %rax
+	cmpq	%rax, -16(%rbp)
+	jne	.L2506
+	movq	16(%rbp), %rdx
+	movq	24(%rbp), %rax
+	subq	$1, %rax
+	movq	%rax, 24(%rbp)
+	movq	24(%rbp), %rax
+	addq	%rdx, %rax
+	movb	$0, (%rax)
+	movq	40(%rbp), %rax
+	subq	$1, %rax
+	movq	%rax, 40(%rbp)
+	movq	-8(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	jmp	.L2504
+.L2506:
+	movq	24(%rbp), %rax
+	cmpq	-16(%rbp), %rax
+	jb	.L2508
+	movq	32(%rbp), %rax
+	subq	-16(%rbp), %rax
+	leaq	-1(%rax), %rdx
+	movq	16(%rbp), %rax
+	movq	-16(%rbp), %rcx
+	addq	$1, %rcx
+	addq	%rax, %rcx
+	movq	16(%rbp), %rsi
+	movq	-16(%rbp), %rax
+	addq	%rsi, %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	memmove@PLT
+	movq	24(%rbp), %rax
+	subq	$1, %rax
+	movq	%rax, 24(%rbp)
+.L2508:
+	movq	40(%rbp), %rax
+	subq	$1, %rax
+	movq	%rax, 40(%rbp)
+	movq	-8(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+.L2504:
+	movq	-8(%rbp), %rax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5112:
+	.size	buffer_remove_char_mv_cur, .-buffer_remove_char_mv_cur
+	.globl	buffer_remove_char
+	.type	buffer_remove_char, @function
+buffer_remove_char:
+.LFB5113:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movq	%rdi, -8(%rbp)
+	movq	%rsi, -16(%rbp)
+	movq	24(%rbp), %rax
+	testq	%rax, %rax
+	jne	.L2510
+	movq	-8(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	jmp	.L2511
+.L2510:
+	movq	24(%rbp), %rax
+	cmpq	-16(%rbp), %rax
+	jnb	.L2512
+	movq	-8(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	jmp	.L2511
+.L2512:
+	movq	24(%rbp), %rax
+	cmpq	%rax, -16(%rbp)
+	jne	.L2513
+	movq	16(%rbp), %rdx
+	movq	24(%rbp), %rax
+	subq	$1, %rax
+	movq	%rax, 24(%rbp)
+	movq	24(%rbp), %rax
+	addq	%rdx, %rax
+	movb	$0, (%rax)
+	movq	-8(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	jmp	.L2511
+.L2513:
+	movq	24(%rbp), %rax
+	cmpq	-16(%rbp), %rax
+	jb	.L2515
+	movq	32(%rbp), %rax
+	subq	-16(%rbp), %rax
+	leaq	-1(%rax), %rdx
+	movq	16(%rbp), %rax
+	movq	-16(%rbp), %rcx
+	addq	$1, %rcx
+	addq	%rax, %rcx
+	movq	16(%rbp), %rsi
+	movq	-16(%rbp), %rax
+	addq	%rsi, %rax
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	memmove@PLT
+	movq	24(%rbp), %rax
+	subq	$1, %rax
+	movq	%rax, 24(%rbp)
+.L2515:
+	movq	-8(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+.L2511:
+	movq	-8(%rbp), %rax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5113:
+	.size	buffer_remove_char, .-buffer_remove_char
+	.section	.rodata
+.LC104:
+	.string	"%ld\n"
+	.text
+	.globl	buffer_remove_between
+	.type	buffer_remove_between, @function
+buffer_remove_between:
+.LFB5114:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$80, %rsp
+	movq	%rdi, -56(%rbp)
+	movq	%rsi, -64(%rbp)
+	movq	%rdx, -72(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movq	24(%rbp), %rax
+	testq	%rax, %rax
+	jne	.L2517
+	movq	-56(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	jmp	.L2522
+.L2517:
+	movq	-64(%rbp), %rax
+	cmpq	-72(%rbp), %rax
+	jne	.L2519
+	movq	-56(%rbp), %rdi
+	movq	-64(%rbp), %rsi
+	subq	$32, %rsp
+	movq	%rsp, %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	call	buffer_remove_char
+	addq	$32, %rsp
+	jmp	.L2522
+.L2519:
+	movq	-64(%rbp), %rax
+	cmpq	%rax, -72(%rbp)
+	jnb	.L2520
+	movq	-56(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	jmp	.L2522
+.L2520:
+	movq	16(%rbp), %rax
+	movq	%rax, -32(%rbp)
+	movq	24(%rbp), %rax
+	movq	%rax, -24(%rbp)
+	movq	32(%rbp), %rax
+	movq	%rax, -16(%rbp)
+	movq	-24(%rbp), %rax
+	cmpq	-72(%rbp), %rax
+	jnb	.L2521
+	movq	-56(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	jmp	.L2522
+.L2521:
+	movq	-72(%rbp), %rax
+	subq	-64(%rbp), %rax
+	movq	%rax, -40(%rbp)
+	movq	-40(%rbp), %rax
+	movq	%rax, %rsi
+	leaq	.LC104(%rip), %rax
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	printf@PLT
+	movq	-24(%rbp), %rax
+	subq	-40(%rbp), %rax
+	movq	-32(%rbp), %rcx
+	movq	-72(%rbp), %rdx
+	leaq	(%rcx,%rdx), %rsi
+	movq	-32(%rbp), %rcx
+	movq	-64(%rbp), %rdx
+	addq	%rdx, %rcx
+	movq	%rax, %rdx
+	movq	%rcx, %rdi
+	call	memmove@PLT
+	movq	-24(%rbp), %rax
+	subq	-40(%rbp), %rax
+	movq	%rax, -24(%rbp)
+	movq	40(%rbp), %rax
+	subq	-40(%rbp), %rax
+	movq	%rax, 40(%rbp)
+	movq	-56(%rbp), %rcx
+	movq	16(%rbp), %rax
+	movq	24(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	32(%rbp), %rax
+	movq	40(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+.L2522:
+	movq	-8(%rbp), %rax
+	subq	%fs:40, %rax
+	je	.L2523
+	call	__stack_chk_fail@PLT
+.L2523:
+	movq	-56(%rbp), %rax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5114:
+	.size	buffer_remove_between, .-buffer_remove_between
+	.globl	buffer_index_bw_word
+	.type	buffer_index_bw_word, @function
+buffer_index_bw_word:
+.LFB5115:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movq	24(%rbp), %rax
+	testq	%rax, %rax
+	jne	.L2525
+	movl	$0, %eax
+	jmp	.L2526
+.L2525:
+	movb	$0, -11(%rbp)
+	movq	40(%rbp), %rax
+	movq	%rax, -8(%rbp)
+	jmp	.L2527
+.L2530:
+	movq	16(%rbp), %rdx
+	movq	-8(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movb	%al, -9(%rbp)
+	call	__ctype_b_loc@PLT
+	movq	(%rax), %rax
+	movsbq	-9(%rbp), %rdx
+	addq	%rdx, %rdx
+	addq	%rdx, %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %eax
+	andl	$1024, %eax
+	testl	%eax, %eax
+	je	.L2528
+	movb	$1, -11(%rbp)
+	jmp	.L2529
+.L2528:
+	cmpb	$0, -11(%rbp)
+	je	.L2529
+	movq	-8(%rbp), %rax
+	addq	$1, %rax
+	jmp	.L2526
+.L2529:
+	subq	$1, -8(%rbp)
+.L2527:
+	cmpq	$0, -8(%rbp)
+	jne	.L2530
+	cmpq	$0, -8(%rbp)
+	jne	.L2531
+	movq	16(%rbp), %rdx
+	movq	-8(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movb	%al, -10(%rbp)
+	call	__ctype_b_loc@PLT
+	movq	(%rax), %rax
+	movsbq	-10(%rbp), %rdx
+	addq	%rdx, %rdx
+	addq	%rdx, %rax
+	movzwl	(%rax), %eax
+	movzwl	%ax, %eax
+	andl	$1024, %eax
+	testl	%eax, %eax
+	je	.L2532
+	movb	$1, -11(%rbp)
+	jmp	.L2531
+.L2532:
+	cmpb	$0, -11(%rbp)
+	je	.L2531
+	movq	-8(%rbp), %rax
+	addq	$1, %rax
+	jmp	.L2526
+.L2531:
+	movq	-8(%rbp), %rax
+.L2526:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5115:
+	.size	buffer_index_bw_word, .-buffer_index_bw_word
+	.section	.rodata
+	.align 8
+.LC105:
+	.string	"Using software acceleration: %s\n"
+	.text
+	.globl	app_create
+	.type	app_create, @function
+app_create:
+.LFB5116:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$152, %rsp
+	.cfi_offset 3, -24
+	movq	%rdi, -152(%rbp)
+	movq	%rsi, -160(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -24(%rbp)
+	xorl	%eax, %eax
+	leaq	-144(%rbp), %rdx
+	movl	$0, %eax
+	movl	$15, %ecx
+	movq	%rdx, %rdi
+	rep stosq
+	movq	-160(%rbp), %rax
+	movq	%rax, %rdi
+	call	window_create
+	movq	%rax, -144(%rbp)
+	movq	%rdx, -136(%rbp)
+	leaq	-64(%rbp), %rax
+	movq	%rax, %rdi
+	call	buffer_create
+	movb	$1, -32(%rbp)
+	movb	$4, -31(%rbp)
+	movq	-144(%rbp), %rax
+	movl	$2, %edx
+	movl	$-1, %esi
+	movq	%rax, %rdi
+	call	SDL_CreateRenderer@PLT
+	movq	%rax, -128(%rbp)
+	movq	-128(%rbp), %rax
+	testq	%rax, %rax
+	jne	.L2534
+	call	SDL_GetError@PLT
+	movq	%rax, %rsi
+	leaq	.LC105(%rip), %rax
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	printf@PLT
+	movq	-144(%rbp), %rax
+	movl	$1, %edx
+	movl	$-1, %esi
+	movq	%rax, %rdi
+	call	SDL_CreateRenderer@PLT
+	movq	%rax, -128(%rbp)
+.L2534:
+	movq	-128(%rbp), %rax
+	testq	%rax, %rax
+	jne	.L2535
+	call	SDL_GetError@PLT
+	movq	%rax, %rdx
+	movq	stderr(%rip), %rax
+	leaq	.LC103(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	fprintf@PLT
+	movl	$1, %edi
+	call	exit@PLT
+.L2535:
+	movq	-152(%rbp), %rax
+	movq	-144(%rbp), %rcx
+	movq	-136(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	-128(%rbp), %rcx
+	movq	-120(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	-112(%rbp), %rcx
+	movq	-104(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	-96(%rbp), %rcx
+	movq	-88(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	-80(%rbp), %rcx
+	movq	-72(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	-64(%rbp), %rcx
+	movq	-56(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	-48(%rbp), %rcx
+	movq	-40(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	-32(%rbp), %rdx
+	movq	%rdx, 112(%rax)
+	movq	-24(%rbp), %rax
+	subq	%fs:40, %rax
+	je	.L2537
+	call	__stack_chk_fail@PLT
+.L2537:
+	movq	-152(%rbp), %rax
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5116:
+	.size	app_create, .-app_create
+	.globl	app_render_char
+	.type	app_render_char, @function
+app_render_char:
+.LFB5117:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$64, %rsp
+	movl	%edi, %ecx
+	movl	$0, %eax
+	movl	$0, %edx
+	movq	%xmm0, %rax
+	movq	%xmm1, %rdx
+	movq	%rax, -64(%rbp)
+	movq	%rdx, -56(%rbp)
+	movsd	%xmm2, -48(%rbp)
+	movl	%ecx, %eax
+	movb	%al, -36(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movsd	-64(%rbp), %xmm0
+	cvttsd2sil	%xmm0, %eax
+	movl	%eax, -32(%rbp)
+	movsd	-56(%rbp), %xmm0
+	cvttsd2sil	%xmm0, %eax
+	movl	%eax, -28(%rbp)
+	movl	88(%rbp), %eax
+	movl	%eax, %eax
+	testq	%rax, %rax
+	js	.L2539
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rax, %xmm0
+	jmp	.L2540
+.L2539:
+	movq	%rax, %rdx
+	shrq	%rdx
+	andl	$1, %eax
+	orq	%rax, %rdx
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rdx, %xmm0
+	addsd	%xmm0, %xmm0
+.L2540:
+	mulsd	-48(%rbp), %xmm0
+	cvttsd2sil	%xmm0, %eax
+	movl	%eax, -24(%rbp)
+	movl	92(%rbp), %eax
+	movl	%eax, %eax
+	testq	%rax, %rax
+	js	.L2541
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rax, %xmm0
+	jmp	.L2542
+.L2541:
+	movq	%rax, %rdx
+	shrq	%rdx
+	andl	$1, %eax
+	orq	%rax, %rdx
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rdx, %xmm0
+	addsd	%xmm0, %xmm0
+.L2542:
+	mulsd	-48(%rbp), %xmm0
+	cvttsd2sil	%xmm0, %eax
+	movl	%eax, -20(%rbp)
+	movq	56(%rbp), %rax
+	movsbq	-36(%rbp), %rdx
+	salq	$4, %rdx
+	subq	$512, %rdx
+	leaq	(%rax,%rdx), %rdi
+	movq	48(%rbp), %rsi
+	movq	32(%rbp), %rax
+	leaq	-32(%rbp), %rdx
+	movq	%rdx, %rcx
+	movq	%rdi, %rdx
+	movq	%rax, %rdi
+	call	SDL_RenderCopy@PLT
+	testl	%eax, %eax
+	jns	.L2545
+	call	SDL_GetError@PLT
+	movq	%rax, %rdx
+	movq	stderr(%rip), %rax
+	leaq	.LC103(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	fprintf@PLT
+	movl	$1, %edi
+	call	exit@PLT
+.L2545:
+	nop
+	movq	-8(%rbp), %rax
+	subq	%fs:40, %rax
+	je	.L2544
+	call	__stack_chk_fail@PLT
+.L2544:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5117:
+	.size	app_render_char, .-app_render_char
+	.globl	app_set_text_color
+	.type	app_set_text_color, @function
+app_set_text_color:
+.LFB5118:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$32, %rsp
+	movl	%edi, -20(%rbp)
+	movl	-20(%rbp), %eax
+	shrl	$24, %eax
+	movb	%al, -4(%rbp)
+	movl	-20(%rbp), %eax
+	shrl	$16, %eax
+	movb	%al, -3(%rbp)
+	movl	-20(%rbp), %eax
+	shrl	$8, %eax
+	movb	%al, -2(%rbp)
+	movl	-20(%rbp), %eax
+	movb	%al, -1(%rbp)
+	movzbl	-2(%rbp), %ecx
+	movzbl	-3(%rbp), %edx
+	movzbl	-4(%rbp), %esi
+	movq	48(%rbp), %rax
+	movq	%rax, %rdi
+	call	SDL_SetTextureColorMod@PLT
+	testl	%eax, %eax
+	jns	.L2547
+	call	SDL_GetError@PLT
+	movq	%rax, %rdx
+	movq	stderr(%rip), %rax
+	leaq	.LC103(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	fprintf@PLT
+	movl	$1, %edi
+	call	exit@PLT
+.L2547:
+	movzbl	-1(%rbp), %edx
+	movq	48(%rbp), %rax
+	movl	%edx, %esi
+	movq	%rax, %rdi
+	call	SDL_SetTextureAlphaMod@PLT
+	testl	%eax, %eax
+	jns	.L2549
+	call	SDL_GetError@PLT
+	movq	%rax, %rdx
+	movq	stderr(%rip), %rax
+	leaq	.LC103(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	fprintf@PLT
+	movl	$1, %edi
+	call	exit@PLT
+.L2549:
+	nop
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5118:
+	.size	app_set_text_color, .-app_set_text_color
+	.globl	app_get_text_color
+	.type	app_get_text_color, @function
+app_get_text_color:
+.LFB5119:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$16, %rsp
+	movq	%fs:40, %rax
+	movq	%rax, -8(%rbp)
+	xorl	%eax, %eax
+	movb	$0, -12(%rbp)
+	movb	$0, -11(%rbp)
+	movb	$0, -10(%rbp)
+	movb	$0, -9(%rbp)
+	movq	48(%rbp), %rax
+	leaq	-10(%rbp), %rcx
+	leaq	-11(%rbp), %rdx
+	leaq	-12(%rbp), %rsi
+	movq	%rax, %rdi
+	call	SDL_GetTextureColorMod@PLT
+	testl	%eax, %eax
+	jns	.L2551
+	call	SDL_GetError@PLT
+	movq	%rax, %rdx
+	movq	stderr(%rip), %rax
+	leaq	.LC103(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	fprintf@PLT
+	movl	$1, %edi
+	call	exit@PLT
+.L2551:
+	movq	48(%rbp), %rax
+	leaq	-9(%rbp), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	SDL_GetTextureAlphaMod@PLT
+	testl	%eax, %eax
+	jns	.L2552
+	call	SDL_GetError@PLT
+	movq	%rax, %rdx
+	movq	stderr(%rip), %rax
+	leaq	.LC103(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	fprintf@PLT
+	movl	$1, %edi
+	call	exit@PLT
+.L2552:
+	movzbl	-12(%rbp), %eax
+	movzbl	%al, %eax
+	sall	$24, %eax
+	movl	%eax, %edx
+	movzbl	-11(%rbp), %eax
+	movzbl	%al, %eax
+	sall	$16, %eax
+	orl	%eax, %edx
+	movzbl	-10(%rbp), %eax
+	movzbl	%al, %eax
+	sall	$8, %eax
+	orl	%eax, %edx
+	movzbl	-9(%rbp), %eax
+	movzbl	%al, %eax
+	orl	%edx, %eax
+	movq	-8(%rbp), %rdx
+	subq	%fs:40, %rdx
+	je	.L2554
+	call	__stack_chk_fail@PLT
+.L2554:
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5119:
+	.size	app_get_text_color, .-app_get_text_color
+	.globl	app_render_text
+	.type	app_render_text, @function
+app_render_text:
+.LFB5120:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$88, %rsp
+	.cfi_offset 3, -24
+	movq	%rdi, -56(%rbp)
+	movq	%rsi, -64(%rbp)
+	movl	$0, %ecx
+	movl	$0, %ebx
+	movq	%xmm0, %rcx
+	movq	%xmm1, %rbx
+	movq	%rcx, -80(%rbp)
+	movq	%rbx, -72(%rbp)
+	movl	%edx, -84(%rbp)
+	movsd	%xmm2, -96(%rbp)
+	movl	-84(%rbp), %edx
+	subq	$8, %rsp
+	subq	$120, %rsp
+	movq	%rsp, %rax
+	movq	16(%rbp), %rcx
+	movq	24(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	32(%rbp), %rcx
+	movq	40(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	48(%rbp), %rcx
+	movq	56(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	64(%rbp), %rcx
+	movq	72(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	80(%rbp), %rcx
+	movq	88(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	96(%rbp), %rcx
+	movq	104(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	112(%rbp), %rcx
+	movq	120(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	128(%rbp), %rcx
+	movq	%rcx, 112(%rax)
+	movl	%edx, %edi
+	call	app_set_text_color
+	subq	$-128, %rsp
+	movq	-80(%rbp), %rax
+	movq	-72(%rbp), %rdx
+	movq	%rax, -32(%rbp)
+	movq	%rdx, -24(%rbp)
+	movq	$0, -40(%rbp)
+	jmp	.L2556
+.L2573:
+	movq	-56(%rbp), %rdx
+	movq	-40(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movb	%al, -42(%rbp)
+	movsd	-32(%rbp), %xmm0
+	movl	28(%rbp), %eax
+	pxor	%xmm1, %xmm1
+	cvtsi2sdl	%eax, %xmm1
+	comisd	%xmm1, %xmm0
+	seta	%al
+	movb	%al, -41(%rbp)
+	movzbl	-41(%rbp), %eax
+	pxor	%xmm1, %xmm1
+	cvtsi2sdl	%eax, %xmm1
+	movsd	-80(%rbp), %xmm0
+	mulsd	%xmm0, %xmm1
+	movzbl	-41(%rbp), %eax
+	xorl	$1, %eax
+	testb	%al, %al
+	je	.L2557
+	movsd	.LC90(%rip), %xmm0
+	jmp	.L2558
+.L2557:
+	pxor	%xmm0, %xmm0
+.L2558:
+	movsd	-32(%rbp), %xmm2
+	mulsd	%xmm2, %xmm0
+	addsd	%xmm1, %xmm0
+	movsd	%xmm0, -32(%rbp)
+	movsd	-24(%rbp), %xmm1
+	movzbl	-41(%rbp), %eax
+	pxor	%xmm2, %xmm2
+	cvtsi2sdl	%eax, %xmm2
+	movl	92(%rbp), %eax
+	movl	%eax, %eax
+	testq	%rax, %rax
+	js	.L2559
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rax, %xmm0
+	jmp	.L2560
+.L2559:
+	movq	%rax, %rdx
+	shrq	%rdx
+	andl	$1, %eax
+	orq	%rax, %rdx
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rdx, %xmm0
+	addsd	%xmm0, %xmm0
+.L2560:
+	mulsd	-96(%rbp), %xmm0
+	mulsd	%xmm0, %xmm2
+	pxor	%xmm0, %xmm0
+	addsd	%xmm2, %xmm0
+	addsd	%xmm1, %xmm0
+	movsd	%xmm0, -24(%rbp)
+	movsbl	-42(%rbp), %eax
+	cmpl	$9, %eax
+	je	.L2561
+	cmpl	$10, %eax
+	jne	.L2562
+	movq	120(%rbp), %rax
+	cmpq	%rax, -40(%rbp)
+	jne	.L2563
+	movq	-32(%rbp), %rdx
+	movsd	-24(%rbp), %xmm1
+	subq	$8, %rsp
+	subq	$120, %rsp
+	movq	%rsp, %rax
+	movq	16(%rbp), %rcx
+	movq	24(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	32(%rbp), %rcx
+	movq	40(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	48(%rbp), %rcx
+	movq	56(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	64(%rbp), %rcx
+	movq	72(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	80(%rbp), %rcx
+	movq	88(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	96(%rbp), %rcx
+	movq	104(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	112(%rbp), %rcx
+	movq	120(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	128(%rbp), %rcx
+	movq	%rcx, 112(%rax)
+	movl	$-16711681, %edi
+	movq	%rdx, %xmm0
+	call	app_render_cursor_in_pos
+	subq	$-128, %rsp
+.L2563:
+	movsd	-80(%rbp), %xmm0
+	movsd	%xmm0, -32(%rbp)
+	movsd	-24(%rbp), %xmm1
+	movl	92(%rbp), %eax
+	movl	%eax, %eax
+	testq	%rax, %rax
+	js	.L2564
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rax, %xmm0
+	jmp	.L2565
+.L2564:
+	movq	%rax, %rdx
+	shrq	%rdx
+	andl	$1, %eax
+	orq	%rax, %rdx
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rdx, %xmm0
+	addsd	%xmm0, %xmm0
+.L2565:
+	mulsd	-96(%rbp), %xmm0
+	addsd	%xmm1, %xmm0
+	movsd	%xmm0, -24(%rbp)
+	jmp	.L2566
+.L2561:
+	movq	120(%rbp), %rax
+	cmpq	%rax, -40(%rbp)
+	jne	.L2567
+	movq	-32(%rbp), %rdx
+	movsd	-24(%rbp), %xmm1
+	subq	$8, %rsp
+	subq	$120, %rsp
+	movq	%rsp, %rax
+	movq	16(%rbp), %rcx
+	movq	24(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	32(%rbp), %rcx
+	movq	40(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	48(%rbp), %rcx
+	movq	56(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	64(%rbp), %rcx
+	movq	72(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	80(%rbp), %rcx
+	movq	88(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	96(%rbp), %rcx
+	movq	104(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	112(%rbp), %rcx
+	movq	120(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	128(%rbp), %rcx
+	movq	%rcx, 112(%rax)
+	movl	$-16711681, %edi
+	movq	%rdx, %xmm0
+	call	app_render_cursor_in_pos
+	subq	$-128, %rsp
+.L2567:
+	movsd	-32(%rbp), %xmm1
+	movl	88(%rbp), %eax
+	movl	%eax, %eax
+	testq	%rax, %rax
+	js	.L2568
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rax, %xmm0
+	jmp	.L2569
+.L2568:
+	movq	%rax, %rdx
+	shrq	%rdx
+	andl	$1, %eax
+	orq	%rax, %rdx
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rdx, %xmm0
+	addsd	%xmm0, %xmm0
+.L2569:
+	movapd	%xmm0, %xmm2
+	mulsd	-96(%rbp), %xmm2
+	movzbl	129(%rbp), %eax
+	movzbl	%al, %eax
+	pxor	%xmm0, %xmm0
+	cvtsi2sdl	%eax, %xmm0
+	mulsd	%xmm2, %xmm0
+	addsd	%xmm1, %xmm0
+	movsd	%xmm0, -32(%rbp)
+	jmp	.L2566
+.L2562:
+	movsbl	-42(%rbp), %edx
+	movsd	-96(%rbp), %xmm0
+	movq	-32(%rbp), %rsi
+	movsd	-24(%rbp), %xmm1
+	subq	$8, %rsp
+	subq	$120, %rsp
+	movq	%rsp, %rax
+	movq	16(%rbp), %rcx
+	movq	24(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	32(%rbp), %rcx
+	movq	40(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	48(%rbp), %rcx
+	movq	56(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	64(%rbp), %rcx
+	movq	72(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	80(%rbp), %rcx
+	movq	88(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	96(%rbp), %rcx
+	movq	104(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	112(%rbp), %rcx
+	movq	120(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	128(%rbp), %rcx
+	movq	%rcx, 112(%rax)
+	movapd	%xmm0, %xmm2
+	movq	%rsi, %xmm0
+	movl	%edx, %edi
+	call	app_render_char
+	subq	$-128, %rsp
+	movq	120(%rbp), %rax
+	cmpq	%rax, -40(%rbp)
+	jne	.L2570
+	movq	-32(%rbp), %rdx
+	movsd	-24(%rbp), %xmm1
+	subq	$8, %rsp
+	subq	$120, %rsp
+	movq	%rsp, %rax
+	movq	16(%rbp), %rcx
+	movq	24(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	32(%rbp), %rcx
+	movq	40(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	48(%rbp), %rcx
+	movq	56(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	64(%rbp), %rcx
+	movq	72(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	80(%rbp), %rcx
+	movq	88(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	96(%rbp), %rcx
+	movq	104(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	112(%rbp), %rcx
+	movq	120(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	128(%rbp), %rcx
+	movq	%rcx, 112(%rax)
+	movl	$-16711681, %edi
+	movq	%rdx, %xmm0
+	call	app_render_cursor_in_pos
+	subq	$-128, %rsp
+.L2570:
+	movsd	-32(%rbp), %xmm1
+	movl	88(%rbp), %eax
+	movl	%eax, %eax
+	testq	%rax, %rax
+	js	.L2571
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rax, %xmm0
+	jmp	.L2572
+.L2571:
+	movq	%rax, %rdx
+	shrq	%rdx
+	andl	$1, %eax
+	orq	%rax, %rdx
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rdx, %xmm0
+	addsd	%xmm0, %xmm0
+.L2572:
+	mulsd	-96(%rbp), %xmm0
+	addsd	%xmm1, %xmm0
+	movsd	%xmm0, -32(%rbp)
+.L2566:
+	addq	$1, -40(%rbp)
+.L2556:
+	movq	-40(%rbp), %rax
+	cmpq	-64(%rbp), %rax
+	jb	.L2573
+	movq	120(%rbp), %rax
+	cmpq	%rax, -40(%rbp)
+	jne	.L2575
+	movq	-32(%rbp), %rdx
+	movsd	-24(%rbp), %xmm1
+	subq	$8, %rsp
+	subq	$120, %rsp
+	movq	%rsp, %rax
+	movq	16(%rbp), %rcx
+	movq	24(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	32(%rbp), %rcx
+	movq	40(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	48(%rbp), %rcx
+	movq	56(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	64(%rbp), %rcx
+	movq	72(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	80(%rbp), %rcx
+	movq	88(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	96(%rbp), %rcx
+	movq	104(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	112(%rbp), %rcx
+	movq	120(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	128(%rbp), %rcx
+	movq	%rcx, 112(%rax)
+	movl	$-16711681, %edi
+	movq	%rdx, %xmm0
+	call	app_render_cursor_in_pos
+	subq	$-128, %rsp
+.L2575:
+	nop
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5120:
+	.size	app_render_text, .-app_render_text
+	.globl	app_render_cursor_in_pos
+	.type	app_render_cursor_in_pos, @function
+app_render_cursor_in_pos:
+.LFB5121:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$88, %rsp
+	.cfi_offset 3, -24
+	movl	$0, %eax
+	movl	$0, %edx
+	movq	%xmm0, %rax
+	movq	%xmm1, %rdx
+	movq	%rax, -80(%rbp)
+	movq	%rdx, -72(%rbp)
+	movl	%edi, -84(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -24(%rbp)
+	xorl	%eax, %eax
+	movsd	-80(%rbp), %xmm0
+	cvttsd2sil	%xmm0, %eax
+	movl	%eax, -48(%rbp)
+	movsd	-72(%rbp), %xmm0
+	cvttsd2sil	%xmm0, %eax
+	movl	%eax, -44(%rbp)
+	movl	88(%rbp), %eax
+	movl	%eax, %eax
+	testq	%rax, %rax
+	js	.L2577
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rax, %xmm0
+	jmp	.L2578
+.L2577:
+	movq	%rax, %rdx
+	shrq	%rdx
+	andl	$1, %eax
+	orq	%rax, %rdx
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rdx, %xmm0
+	addsd	%xmm0, %xmm0
+.L2578:
+	movsd	64(%rbp), %xmm1
+	mulsd	%xmm1, %xmm0
+	cvttsd2sil	%xmm0, %eax
+	movl	%eax, -40(%rbp)
+	movl	92(%rbp), %eax
+	movl	%eax, %eax
+	testq	%rax, %rax
+	js	.L2579
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rax, %xmm0
+	jmp	.L2580
+.L2579:
+	movq	%rax, %rdx
+	shrq	%rdx
+	andl	$1, %eax
+	orq	%rax, %rdx
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rdx, %xmm0
+	addsd	%xmm0, %xmm0
+.L2580:
+	movsd	64(%rbp), %xmm1
+	mulsd	%xmm1, %xmm0
+	cvttsd2sil	%xmm0, %eax
+	movl	%eax, -36(%rbp)
+	movl	-84(%rbp), %eax
+	movzbl	%al, %edi
+	movl	-84(%rbp), %eax
+	shrl	$8, %eax
+	movzbl	%al, %ecx
+	movl	-84(%rbp), %eax
+	shrl	$16, %eax
+	movzbl	%al, %edx
+	movl	-84(%rbp), %eax
+	shrl	$24, %eax
+	movzbl	%al, %esi
+	movq	32(%rbp), %rax
+	movl	%edi, %r8d
+	movq	%rax, %rdi
+	call	SDL_SetRenderDrawColor@PLT
+	testl	%eax, %eax
+	jns	.L2581
+	call	SDL_GetError@PLT
+	movq	%rax, %rdx
+	movq	stderr(%rip), %rax
+	leaq	.LC103(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	fprintf@PLT
+	movl	$1, %edi
+	call	exit@PLT
+.L2581:
+	movq	32(%rbp), %rax
+	leaq	-48(%rbp), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	SDL_RenderFillRect@PLT
+	testl	%eax, %eax
+	jns	.L2582
+	call	SDL_GetError@PLT
+	movq	%rax, %rdx
+	movq	stderr(%rip), %rax
+	leaq	.LC103(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	fprintf@PLT
+	movl	$1, %edi
+	call	exit@PLT
+.L2582:
+	movq	120(%rbp), %rdx
+	movq	104(%rbp), %rax
+	cmpq	%rax, %rdx
+	jnb	.L2576
+	movq	96(%rbp), %rdx
+	movq	120(%rbp), %rax
+	addq	%rdx, %rax
+	movzbl	(%rax), %eax
+	movb	%al, -53(%rbp)
+	cmpb	$10, -53(%rbp)
+	je	.L2588
+	subq	$8, %rsp
+	subq	$120, %rsp
+	movq	%rsp, %rax
+	movq	16(%rbp), %rcx
+	movq	24(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	32(%rbp), %rcx
+	movq	40(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	48(%rbp), %rcx
+	movq	56(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	64(%rbp), %rcx
+	movq	72(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	80(%rbp), %rcx
+	movq	88(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	96(%rbp), %rcx
+	movq	104(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	112(%rbp), %rcx
+	movq	120(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	128(%rbp), %rdx
+	movq	%rdx, 112(%rax)
+	call	app_get_text_color
+	subq	$-128, %rsp
+	movl	%eax, -52(%rbp)
+	movl	-84(%rbp), %eax
+	notl	%eax
+	orb	$-1, %al
+	movl	%eax, %edx
+	subq	$8, %rsp
+	subq	$120, %rsp
+	movq	%rsp, %rax
+	movq	16(%rbp), %rcx
+	movq	24(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	32(%rbp), %rcx
+	movq	40(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	48(%rbp), %rcx
+	movq	56(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	64(%rbp), %rcx
+	movq	72(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	80(%rbp), %rcx
+	movq	88(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	96(%rbp), %rcx
+	movq	104(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	112(%rbp), %rcx
+	movq	120(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	128(%rbp), %rcx
+	movq	%rcx, 112(%rax)
+	movl	%edx, %edi
+	call	app_set_text_color
+	subq	$-128, %rsp
+	movsd	64(%rbp), %xmm0
+	movsbl	-53(%rbp), %edx
+	movq	-80(%rbp), %rsi
+	movsd	-72(%rbp), %xmm1
+	subq	$8, %rsp
+	subq	$120, %rsp
+	movq	%rsp, %rax
+	movq	16(%rbp), %rcx
+	movq	24(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	32(%rbp), %rcx
+	movq	40(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	48(%rbp), %rcx
+	movq	56(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	64(%rbp), %rcx
+	movq	72(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	80(%rbp), %rcx
+	movq	88(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	96(%rbp), %rcx
+	movq	104(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	112(%rbp), %rcx
+	movq	120(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	128(%rbp), %rcx
+	movq	%rcx, 112(%rax)
+	movapd	%xmm0, %xmm2
+	movq	%rsi, %xmm0
+	movl	%edx, %edi
+	call	app_render_char
+	subq	$-128, %rsp
+	movl	-52(%rbp), %edx
+	subq	$8, %rsp
+	subq	$120, %rsp
+	movq	%rsp, %rax
+	movq	16(%rbp), %rcx
+	movq	24(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	32(%rbp), %rcx
+	movq	40(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	48(%rbp), %rcx
+	movq	56(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	64(%rbp), %rcx
+	movq	72(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	80(%rbp), %rcx
+	movq	88(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	96(%rbp), %rcx
+	movq	104(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	112(%rbp), %rcx
+	movq	120(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	128(%rbp), %rcx
+	movq	%rcx, 112(%rax)
+	movl	%edx, %edi
+	call	app_set_text_color
+	subq	$-128, %rsp
+	jmp	.L2576
+.L2588:
+	nop
+.L2576:
+	movq	-24(%rbp), %rax
+	subq	%fs:40, %rax
+	je	.L2587
+	call	__stack_chk_fail@PLT
+.L2587:
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5121:
+	.size	app_render_cursor_in_pos, .-app_render_cursor_in_pos
+	.globl	app_render_cursor
+	.type	app_render_cursor, @function
+app_render_cursor:
+.LFB5122:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$40, %rsp
+	.cfi_offset 3, -24
+	movl	%edi, -36(%rbp)
+	movq	120(%rbp), %rax
+	testq	%rax, %rax
+	js	.L2590
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rax, %xmm0
+	jmp	.L2591
+.L2590:
+	movq	%rax, %rdx
+	shrq	%rdx
+	andl	$1, %eax
+	orq	%rax, %rdx
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rdx, %xmm0
+	addsd	%xmm0, %xmm0
+.L2591:
+	movl	88(%rbp), %eax
+	movl	%eax, %eax
+	testq	%rax, %rax
+	js	.L2592
+	pxor	%xmm1, %xmm1
+	cvtsi2sdq	%rax, %xmm1
+	jmp	.L2593
+.L2592:
+	movq	%rax, %rdx
+	shrq	%rdx
+	andl	$1, %eax
+	orq	%rax, %rdx
+	pxor	%xmm1, %xmm1
+	cvtsi2sdq	%rdx, %xmm1
+	addsd	%xmm1, %xmm1
+.L2593:
+	movsd	64(%rbp), %xmm2
+	mulsd	%xmm2, %xmm1
+	mulsd	%xmm1, %xmm0
+	movsd	%xmm0, -32(%rbp)
+	movq	120(%rbp), %rax
+	testq	%rax, %rax
+	js	.L2594
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rax, %xmm0
+	jmp	.L2595
+.L2594:
+	movq	%rax, %rdx
+	shrq	%rdx
+	andl	$1, %eax
+	orq	%rax, %rdx
+	pxor	%xmm0, %xmm0
+	cvtsi2sdq	%rdx, %xmm0
+	addsd	%xmm0, %xmm0
+.L2595:
+	movl	92(%rbp), %eax
+	movl	%eax, %eax
+	testq	%rax, %rax
+	js	.L2596
+	pxor	%xmm1, %xmm1
+	cvtsi2sdq	%rax, %xmm1
+	jmp	.L2597
+.L2596:
+	movq	%rax, %rdx
+	shrq	%rdx
+	andl	$1, %eax
+	orq	%rax, %rdx
+	pxor	%xmm1, %xmm1
+	cvtsi2sdq	%rdx, %xmm1
+	addsd	%xmm1, %xmm1
+.L2597:
+	movsd	64(%rbp), %xmm2
+	mulsd	%xmm2, %xmm1
+	mulsd	%xmm1, %xmm0
+	movsd	%xmm0, -24(%rbp)
+	movl	-36(%rbp), %esi
+	movq	-32(%rbp), %rdx
+	movsd	-24(%rbp), %xmm1
+	subq	$8, %rsp
+	subq	$120, %rsp
+	movq	%rsp, %rax
+	movq	16(%rbp), %rcx
+	movq	24(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	32(%rbp), %rcx
+	movq	40(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	48(%rbp), %rcx
+	movq	56(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	64(%rbp), %rcx
+	movq	72(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	80(%rbp), %rcx
+	movq	88(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	96(%rbp), %rcx
+	movq	104(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	112(%rbp), %rcx
+	movq	120(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	128(%rbp), %rcx
+	movq	%rcx, 112(%rax)
+	movl	%esi, %edi
+	movq	%rdx, %xmm0
+	call	app_render_cursor_in_pos
+	subq	$-128, %rsp
+	nop
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5122:
+	.size	app_render_cursor, .-app_render_cursor
+	.section	.rodata
+.LC107:
+	.string	"file->size = %ld\n"
+	.text
+	.globl	file_read_all
+	.type	file_read_all, @function
+file_read_all:
+.LFB5123:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	subq	$96, %rsp
+	movq	%rdi, -88(%rbp)
+	movq	%rsi, -96(%rbp)
+	movl	$0, -72(%rbp)
+	movq	-96(%rbp), %rax
+	movl	$0, %esi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	open@PLT
+	movl	%eax, -68(%rbp)
+	cmpl	$0, -68(%rbp)
+	jns	.L2599
+	movl	$1, -72(%rbp)
+	jmp	.L2600
+.L2599:
+	movl	-68(%rbp), %eax
+	movl	$2, %edx
+	movl	$0, %esi
+	movl	%eax, %edi
+	call	lseek@PLT
+	movq	%rax, -64(%rbp)
+	cmpq	$0, -64(%rbp)
+	jns	.L2601
+	movl	$2, -72(%rbp)
+	jmp	.L2602
+.L2601:
+	movl	-68(%rbp), %eax
+	movl	$0, %edx
+	movl	$0, %esi
+	movl	%eax, %edi
+	call	lseek@PLT
+	movq	-64(%rbp), %rax
+	movq	%rax, %rsi
+	leaq	.LC107(%rip), %rax
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	printf@PLT
+	movq	-64(%rbp), %rax
+	addq	$1, %rax
+	movq	%rax, -56(%rbp)
+	movq	-56(%rbp), %rax
+	movq	%rax, %rsi
+	movl	$1, %edi
+	call	calloc@PLT
+	movq	%rax, -48(%rbp)
+	cmpq	$0, -48(%rbp)
+	jne	.L2603
+	movl	$4, -72(%rbp)
+	jmp	.L2602
+.L2603:
+	movq	-64(%rbp), %rdx
+	movq	-48(%rbp), %rcx
+	movl	-68(%rbp), %eax
+	movq	%rcx, %rsi
+	movl	%eax, %edi
+	call	read@PLT
+	movq	%rax, -40(%rbp)
+	cmpq	$0, -40(%rbp)
+	jns	.L2604
+	movl	$3, -72(%rbp)
+	movq	-48(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	jmp	.L2602
+.L2604:
+	cmpq	$0, -40(%rbp)
+	jne	.L2605
+	movl	$6, -72(%rbp)
+	movq	-48(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	jmp	.L2602
+.L2605:
+	movl	-68(%rbp), %eax
+	movl	%eax, %edi
+	call	close@PLT
+	testl	%eax, %eax
+	je	.L2606
+	movl	$5, -72(%rbp)
+	jmp	.L2600
+.L2606:
+	movq	-88(%rbp), %rax
+	movq	-48(%rbp), %rdx
+	movq	%rdx, (%rax)
+	movq	-88(%rbp), %rax
+	movq	-56(%rbp), %rdx
+	movq	%rdx, 8(%rax)
+	movq	-88(%rbp), %rax
+	movl	-72(%rbp), %edx
+	movl	%edx, 16(%rax)
+	jmp	.L2608
+.L2602:
+	movl	-68(%rbp), %eax
+	movl	%eax, %edi
+	call	close@PLT
+	testl	%eax, %eax
+	je	.L2609
+	movl	$5, -72(%rbp)
+	jmp	.L2600
+.L2609:
+	nop
+.L2600:
+	movq	-88(%rbp), %rax
+	movq	$0, (%rax)
+	movq	-88(%rbp), %rax
+	movq	$0, 8(%rax)
+	movq	-88(%rbp), %rax
+	movl	-72(%rbp), %edx
+	movl	%edx, 16(%rax)
+.L2608:
+	movq	-88(%rbp), %rax
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5123:
+	.size	file_read_all, .-file_read_all
+	.globl	keybind_left
+	.type	keybind_left, @function
+keybind_left:
+.LFB5124:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	.cfi_offset 3, -24
+	movq	%rdi, -16(%rbp)
+	movq	%rsi, -24(%rbp)
+	movq	-16(%rbp), %rax
+	movq	16(%rbp), %rcx
+	movq	24(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	32(%rbp), %rcx
+	movq	40(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	48(%rbp), %rcx
+	movq	56(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	64(%rbp), %rcx
+	movq	72(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	80(%rbp), %rcx
+	movq	88(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	96(%rbp), %rcx
+	movq	104(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	112(%rbp), %rcx
+	movq	120(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	128(%rbp), %rdx
+	movq	%rdx, 112(%rax)
+	movq	-16(%rbp), %rax
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5124:
+	.size	keybind_left, .-keybind_left
+	.globl	keybind_right
+	.type	keybind_right, @function
+keybind_right:
+.LFB5125:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	.cfi_offset 3, -24
+	movq	%rdi, -16(%rbp)
+	movq	%rsi, -24(%rbp)
+	movq	-16(%rbp), %rax
+	movq	16(%rbp), %rcx
+	movq	24(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	32(%rbp), %rcx
+	movq	40(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	48(%rbp), %rcx
+	movq	56(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	64(%rbp), %rcx
+	movq	72(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	80(%rbp), %rcx
+	movq	88(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	96(%rbp), %rcx
+	movq	104(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	112(%rbp), %rcx
+	movq	120(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	128(%rbp), %rdx
+	movq	%rdx, 112(%rax)
+	movq	-16(%rbp), %rax
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5125:
+	.size	keybind_right, .-keybind_right
+	.section	.data.rel.ro.local,"aw"
+	.align 32
+	.type	keybinds, @object
+	.size	keybinds, 128849028600
+keybinds:
+	.zero	128849028360
+	.long	0
+	.zero	4
+	.quad	keybind_right
+	.quad	0
+	.zero	96
+	.long	0
+	.zero	4
+	.quad	keybind_left
+	.quad	0
+	.zero	96
+	.section	.rodata
+.LC108:
+	.string	"./charmap-oldschool_white.png"
+.LC109:
+	.string	"./src/main.c"
+.LC110:
+	.string	"ged"
+	.text
+	.globl	main
+	.type	main, @function
+main:
+.LFB5126:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$472, %rsp
+	.cfi_offset 3, -24
+	movl	%edi, -372(%rbp)
+	movq	%rsi, -384(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -24(%rbp)
+	xorl	%eax, %eax
+	movl	$32, %edi
+	call	SDL_Init@PLT
+	testl	%eax, %eax
+	jns	.L2615
+	call	SDL_GetError@PLT
+	movq	%rax, %rdx
+	movq	stderr(%rip), %rax
+	leaq	.LC103(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	fprintf@PLT
+	movl	$1, %edi
+	call	exit@PLT
+.L2615:
+	leaq	.LC108(%rip), %rax
+	movq	%rax, -344(%rbp)
+	leaq	.LC109(%rip), %rax
+	movq	%rax, -336(%rbp)
+	leaq	-304(%rbp), %rax
+	leaq	.LC110(%rip), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	app_create
+	movq	-288(%rbp), %rsi
+	leaq	-448(%rbp), %rax
+	movq	-344(%rbp), %rdx
+	movl	$18, %r8d
+	movl	$7, %ecx
+	movq	%rax, %rdi
+	call	font_create
+	movq	-448(%rbp), %rax
+	movq	-440(%rbp), %rdx
+	movq	%rax, -280(%rbp)
+	movq	%rdx, -272(%rbp)
+	movq	-432(%rbp), %rax
+	movq	-424(%rbp), %rdx
+	movq	%rax, -264(%rbp)
+	movq	%rdx, -256(%rbp)
+	movq	-416(%rbp), %rax
+	movq	-408(%rbp), %rdx
+	movq	%rax, -248(%rbp)
+	movq	%rdx, -240(%rbp)
+	movq	-400(%rbp), %rax
+	movq	%rax, -232(%rbp)
+	movsd	.LC111(%rip), %xmm0
+	movsd	%xmm0, -256(%rbp)
+	movq	-224(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	movq	$0, -328(%rbp)
+	movq	$0, -320(%rbp)
+	leaq	-176(%rbp), %rax
+	movq	-336(%rbp), %rdx
+	movq	%rdx, %rsi
+	movq	%rax, %rdi
+	call	file_read_all
+	movq	-176(%rbp), %rax
+	movq	%rax, -328(%rbp)
+	movq	-168(%rbp), %rax
+	movq	%rax, -320(%rbp)
+	movl	-160(%rbp), %eax
+	movl	%eax, -356(%rbp)
+	movq	-328(%rbp), %rax
+	movq	%rax, -224(%rbp)
+	movq	-320(%rbp), %rax
+	subq	$1, %rax
+	movq	%rax, -216(%rbp)
+	movq	-320(%rbp), %rax
+	movq	%rax, -208(%rbp)
+	movq	$0, -352(%rbp)
+	jmp	.L2616
+.L2619:
+	leaq	-304(%rbp), %rax
+	addq	$80, %rax
+	movq	%rax, -312(%rbp)
+	leaq	-176(%rbp), %rdi
+	subq	$8, %rsp
+	movq	-312(%rbp), %rcx
+	subq	$32, %rsp
+	movq	%rsp, %rsi
+	movq	(%rcx), %rax
+	movq	8(%rcx), %rdx
+	movq	%rax, (%rsi)
+	movq	%rdx, 8(%rsi)
+	movq	16(%rcx), %rax
+	movq	24(%rcx), %rdx
+	movq	%rax, 16(%rsi)
+	movq	%rdx, 24(%rsi)
+	subq	$120, %rsp
+	movq	%rsp, %rax
+	movq	-304(%rbp), %rcx
+	movq	-296(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	-288(%rbp), %rcx
+	movq	-280(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	-272(%rbp), %rcx
+	movq	-264(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	-256(%rbp), %rcx
+	movq	-248(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	-240(%rbp), %rcx
+	movq	-232(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	-224(%rbp), %rcx
+	movq	-216(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	-208(%rbp), %rcx
+	movq	-200(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	-192(%rbp), %rdx
+	movq	%rdx, 112(%rax)
+	call	handle_events
+	addq	$160, %rsp
+	movq	-176(%rbp), %rax
+	movq	-168(%rbp), %rdx
+	movq	%rax, -304(%rbp)
+	movq	%rdx, -296(%rbp)
+	movq	-160(%rbp), %rax
+	movq	-152(%rbp), %rdx
+	movq	%rax, -288(%rbp)
+	movq	%rdx, -280(%rbp)
+	movq	-144(%rbp), %rax
+	movq	-136(%rbp), %rdx
+	movq	%rax, -272(%rbp)
+	movq	%rdx, -264(%rbp)
+	movq	-128(%rbp), %rax
+	movq	-120(%rbp), %rdx
+	movq	%rax, -256(%rbp)
+	movq	%rdx, -248(%rbp)
+	movq	-112(%rbp), %rax
+	movq	-104(%rbp), %rdx
+	movq	%rax, -240(%rbp)
+	movq	%rdx, -232(%rbp)
+	movq	-96(%rbp), %rax
+	movq	-88(%rbp), %rdx
+	movq	%rax, -224(%rbp)
+	movq	%rdx, -216(%rbp)
+	movq	-80(%rbp), %rax
+	movq	-72(%rbp), %rdx
+	movq	%rax, -208(%rbp)
+	movq	%rdx, -200(%rbp)
+	movq	-64(%rbp), %rax
+	movq	%rax, -192(%rbp)
+	movq	-312(%rbp), %rcx
+	movq	-56(%rbp), %rax
+	movq	-48(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	-40(%rbp), %rax
+	movq	-32(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	movq	-352(%rbp), %rax
+	movzbl	%al, %esi
+	movq	-288(%rbp), %rax
+	movl	$1, %r8d
+	movl	$0, %ecx
+	movl	$0, %edx
+	movq	%rax, %rdi
+	call	SDL_SetRenderDrawColor@PLT
+	testl	%eax, %eax
+	jns	.L2617
+	call	SDL_GetError@PLT
+	movq	%rax, %rdx
+	movq	stderr(%rip), %rax
+	leaq	.LC103(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	fprintf@PLT
+	movl	$1, %edi
+	call	exit@PLT
+.L2617:
+	movq	-288(%rbp), %rax
+	movq	%rax, %rdi
+	call	SDL_RenderClear@PLT
+	testl	%eax, %eax
+	jns	.L2618
+	call	SDL_GetError@PLT
+	movq	%rax, %rdx
+	movq	stderr(%rip), %rax
+	leaq	.LC103(%rip), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	movl	$0, %eax
+	call	fprintf@PLT
+	movl	$1, %edi
+	call	exit@PLT
+.L2618:
+	movsd	-256(%rbp), %xmm3
+	movsd	%xmm3, -456(%rbp)
+	pxor	%xmm1, %xmm1
+	movq	.LC106(%rip), %rax
+	movq	%rax, %xmm0
+	call	vec2
+	movl	$0, %eax
+	movl	$0, %edx
+	movq	%xmm0, %rax
+	movq	%xmm1, %rdx
+	movq	-312(%rbp), %rcx
+	movq	8(%rcx), %rsi
+	movq	-312(%rbp), %rcx
+	movq	(%rcx), %rdi
+	movq	%rax, -480(%rbp)
+	movq	%rdx, -472(%rbp)
+	movdqa	-480(%rbp), %xmm0
+	movq	%rdx, %xmm1
+	subq	$8, %rsp
+	subq	$120, %rsp
+	movq	%rsp, %rax
+	movq	-304(%rbp), %rcx
+	movq	-296(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	-288(%rbp), %rcx
+	movq	-280(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	-272(%rbp), %rcx
+	movq	-264(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	-256(%rbp), %rcx
+	movq	-248(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	-240(%rbp), %rcx
+	movq	-232(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	-224(%rbp), %rcx
+	movq	-216(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	-208(%rbp), %rcx
+	movq	-200(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	-192(%rbp), %rdx
+	movq	%rdx, 112(%rax)
+	movsd	-456(%rbp), %xmm2
+	movl	$-1, %edx
+	call	app_render_text
+	subq	$-128, %rsp
+	movq	-288(%rbp), %rax
+	movq	%rax, %rdi
+	call	SDL_RenderPresent@PLT
+	addq	$1, -352(%rbp)
+.L2616:
+	movzbl	-192(%rbp), %eax
+	testb	%al, %al
+	jne	.L2619
+	movq	-304(%rbp), %rax
+	movq	%rax, %rdi
+	call	SDL_DestroyWindow@PLT
+	movq	-224(%rbp), %rax
+	movq	%rax, %rdi
+	call	free@PLT
+	call	SDL_Quit@PLT
+	movl	$0, %edi
+	call	exit@PLT
+	.cfi_endproc
+.LFE5126:
+	.size	main, .-main
+	.globl	handle_events
+	.type	handle_events, @function
+handle_events:
+.LFB5127:
+	.cfi_startproc
+	pushq	%rbp
+	.cfi_def_cfa_offset 16
+	.cfi_offset 6, -16
+	movq	%rsp, %rbp
+	.cfi_def_cfa_register 6
+	pushq	%rbx
+	subq	$568, %rsp
+	.cfi_offset 3, -24
+	movq	%rdi, -440(%rbp)
+	movq	%fs:40, %rax
+	movq	%rax, -24(%rbp)
+	xorl	%eax, %eax
+	pxor	%xmm0, %xmm0
+	movaps	%xmm0, -208(%rbp)
+	movaps	%xmm0, -192(%rbp)
+	movaps	%xmm0, -176(%rbp)
+	movq	%xmm0, -160(%rbp)
+	jmp	.L2622
+.L2635:
+	movl	-208(%rbp), %eax
+	cmpl	$771, %eax
+	je	.L2623
+	cmpl	$771, %eax
+	ja	.L2622
+	cmpl	$768, %eax
+	je	.L2624
+	cmpl	$768, %eax
+	ja	.L2622
+	cmpl	$256, %eax
+	je	.L2625
+	cmpl	$512, %eax
+	je	.L2626
+	jmp	.L2622
+.L2625:
+	movb	$0, 128(%rbp)
+	jmp	.L2622
+.L2624:
+	movq	-208(%rbp), %rax
+	movq	-200(%rbp), %rdx
+	movq	%rax, -368(%rbp)
+	movq	%rdx, -360(%rbp)
+	movq	-192(%rbp), %rax
+	movq	-184(%rbp), %rdx
+	movq	%rax, -352(%rbp)
+	movq	%rdx, -344(%rbp)
+	movq	-352(%rbp), %rax
+	movq	-344(%rbp), %rdx
+	movq	%rax, -416(%rbp)
+	movq	%rdx, -408(%rbp)
+	movl	-412(%rbp), %eax
+	movslq	%eax, %rdx
+	movq	%rdx, %rax
+	salq	$4, %rax
+	subq	%rdx, %rax
+	salq	$3, %rax
+	movq	%rax, %rdx
+	leaq	keybinds(%rip), %rax
+	movq	(%rdx,%rax), %rcx
+	movq	8(%rdx,%rax), %rbx
+	movq	%rcx, -144(%rbp)
+	movq	%rbx, -136(%rbp)
+	movq	16(%rdx,%rax), %rcx
+	movq	24(%rdx,%rax), %rbx
+	movq	%rcx, -128(%rbp)
+	movq	%rbx, -120(%rbp)
+	movq	32(%rdx,%rax), %rcx
+	movq	40(%rdx,%rax), %rbx
+	movq	%rcx, -112(%rbp)
+	movq	%rbx, -104(%rbp)
+	movq	48(%rdx,%rax), %rcx
+	movq	56(%rdx,%rax), %rbx
+	movq	%rcx, -96(%rbp)
+	movq	%rbx, -88(%rbp)
+	movq	64(%rdx,%rax), %rcx
+	movq	72(%rdx,%rax), %rbx
+	movq	%rcx, -80(%rbp)
+	movq	%rbx, -72(%rbp)
+	movq	80(%rdx,%rax), %rcx
+	movq	88(%rdx,%rax), %rbx
+	movq	%rcx, -64(%rbp)
+	movq	%rbx, -56(%rbp)
+	movq	96(%rdx,%rax), %rcx
+	movq	104(%rdx,%rax), %rbx
+	movq	%rcx, -48(%rbp)
+	movq	%rbx, -40(%rbp)
+	movq	112(%rdx,%rax), %rax
+	movq	%rax, -32(%rbp)
+	movq	$0, -432(%rbp)
+	jmp	.L2627
+.L2632:
+	movq	-432(%rbp), %rdx
+	movq	%rdx, %rax
+	addq	%rax, %rax
+	addq	%rdx, %rax
+	salq	$3, %rax
+	leaq	-16(%rax), %rax
+	addq	%rbp, %rax
+	leaq	-128(%rax), %rcx
+	movq	(%rcx), %rax
+	movq	8(%rcx), %rdx
+	movq	%rax, -400(%rbp)
+	movq	%rdx, -392(%rbp)
+	movq	16(%rcx), %rax
+	movq	%rax, -384(%rbp)
+	movq	-392(%rbp), %rax
+	testq	%rax, %rax
+	je	.L2638
+	movl	-400(%rbp), %edx
+	movzwl	-408(%rbp), %eax
+	movzwl	%ax, %eax
+	cmpl	%eax, %edx
+	jne	.L2639
+	movq	-392(%rbp), %r8
+	movq	-384(%rbp), %rsi
+	leaq	-576(%rbp), %rdx
+	subq	$8, %rsp
+	subq	$120, %rsp
+	movq	%rsp, %rax
+	movq	16(%rbp), %rcx
+	movq	24(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	32(%rbp), %rcx
+	movq	40(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	48(%rbp), %rcx
+	movq	56(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	64(%rbp), %rcx
+	movq	72(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	80(%rbp), %rcx
+	movq	88(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	96(%rbp), %rcx
+	movq	104(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	112(%rbp), %rcx
+	movq	120(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	128(%rbp), %rcx
+	movq	%rcx, 112(%rax)
+	movq	%rdx, %rdi
+	call	*%r8
+	subq	$-128, %rsp
+	movq	-576(%rbp), %rax
+	movq	-568(%rbp), %rdx
+	movq	%rax, 16(%rbp)
+	movq	%rdx, 24(%rbp)
+	movq	-560(%rbp), %rax
+	movq	-552(%rbp), %rdx
+	movq	%rax, 32(%rbp)
+	movq	%rdx, 40(%rbp)
+	movq	-544(%rbp), %rax
+	movq	-536(%rbp), %rdx
+	movq	%rax, 48(%rbp)
+	movq	%rdx, 56(%rbp)
+	movq	-528(%rbp), %rax
+	movq	-520(%rbp), %rdx
+	movq	%rax, 64(%rbp)
+	movq	%rdx, 72(%rbp)
+	movq	-512(%rbp), %rax
+	movq	-504(%rbp), %rdx
+	movq	%rax, 80(%rbp)
+	movq	%rdx, 88(%rbp)
+	movq	-496(%rbp), %rax
+	movq	-488(%rbp), %rdx
+	movq	%rax, 96(%rbp)
+	movq	%rdx, 104(%rbp)
+	movq	-480(%rbp), %rax
+	movq	-472(%rbp), %rdx
+	movq	%rax, 112(%rbp)
+	movq	%rdx, 120(%rbp)
+	movq	-464(%rbp), %rax
+	movq	%rax, 128(%rbp)
+	jmp	.L2631
+.L2638:
+	nop
+	jmp	.L2631
+.L2639:
+	nop
+.L2631:
+	addq	$1, -432(%rbp)
+.L2627:
+	cmpq	$4, -432(%rbp)
+	jbe	.L2632
+	jmp	.L2622
+.L2623:
+	leaq	-208(%rbp), %rax
+	addq	$12, %rax
+	movq	%rax, -424(%rbp)
+	jmp	.L2633
+.L2634:
+	movq	-424(%rbp), %rax
+	leaq	1(%rax), %rdx
+	movq	%rdx, -424(%rbp)
+	movzbl	(%rax), %eax
+	movsbl	%al, %r8d
+	movq	160(%rbp), %rsi
+	leaq	136(%rbp), %rdi
+	subq	$32, %rsp
+	movq	%rsp, %rcx
+	movq	136(%rbp), %rax
+	movq	144(%rbp), %rdx
+	movq	%rax, (%rcx)
+	movq	%rdx, 8(%rcx)
+	movq	152(%rbp), %rax
+	movq	160(%rbp), %rdx
+	movq	%rax, 16(%rcx)
+	movq	%rdx, 24(%rcx)
+	movl	%r8d, %edx
+	call	buffer_insert_char
+	addq	$32, %rsp
+.L2633:
+	movq	-424(%rbp), %rax
+	movzbl	(%rax), %eax
+	testb	%al, %al
+	jne	.L2634
+	jmp	.L2622
+.L2626:
+	movq	16(%rbp), %rax
+	leaq	24(%rbp), %rdx
+	leaq	28(%rbp), %rcx
+	movq	%rcx, %rsi
+	movq	%rax, %rdi
+	call	SDL_GetWindowSize@PLT
+	nop
+.L2622:
+	leaq	-208(%rbp), %rax
+	movq	%rax, %rdi
+	call	SDL_PollEvent@PLT
+	testl	%eax, %eax
+	jne	.L2635
+	movq	-440(%rbp), %rax
+	movq	16(%rbp), %rcx
+	movq	24(%rbp), %rbx
+	movq	%rcx, (%rax)
+	movq	%rbx, 8(%rax)
+	movq	32(%rbp), %rcx
+	movq	40(%rbp), %rbx
+	movq	%rcx, 16(%rax)
+	movq	%rbx, 24(%rax)
+	movq	48(%rbp), %rcx
+	movq	56(%rbp), %rbx
+	movq	%rcx, 32(%rax)
+	movq	%rbx, 40(%rax)
+	movq	64(%rbp), %rcx
+	movq	72(%rbp), %rbx
+	movq	%rcx, 48(%rax)
+	movq	%rbx, 56(%rax)
+	movq	80(%rbp), %rcx
+	movq	88(%rbp), %rbx
+	movq	%rcx, 64(%rax)
+	movq	%rbx, 72(%rax)
+	movq	96(%rbp), %rcx
+	movq	104(%rbp), %rbx
+	movq	%rcx, 80(%rax)
+	movq	%rbx, 88(%rax)
+	movq	112(%rbp), %rcx
+	movq	120(%rbp), %rbx
+	movq	%rcx, 96(%rax)
+	movq	%rbx, 104(%rax)
+	movq	128(%rbp), %rdx
+	movq	%rdx, 112(%rax)
+	movq	-440(%rbp), %rcx
+	movq	136(%rbp), %rax
+	movq	144(%rbp), %rdx
+	movq	%rax, 120(%rcx)
+	movq	%rdx, 128(%rcx)
+	movq	152(%rbp), %rax
+	movq	160(%rbp), %rdx
+	movq	%rax, 136(%rcx)
+	movq	%rdx, 144(%rcx)
+	movq	-24(%rbp), %rax
+	subq	%fs:40, %rax
+	je	.L2637
+	call	__stack_chk_fail@PLT
+.L2637:
+	movq	-440(%rbp), %rax
+	movq	-8(%rbp), %rbx
+	leave
+	.cfi_def_cfa 7, 8
+	ret
+	.cfi_endproc
+.LFE5127:
+	.size	handle_events, .-handle_events
+	.section	.rodata
+	.align 32
+	.type	__PRETTY_FUNCTION__.20, @object
+	.size	__PRETTY_FUNCTION__.20, 32
+__PRETTY_FUNCTION__.20:
+	.string	"stbi__load_and_postprocess_8bit"
+	.align 32
+	.type	__PRETTY_FUNCTION__.19, @object
+	.size	__PRETTY_FUNCTION__.19, 33
+__PRETTY_FUNCTION__.19:
+	.string	"stbi__load_and_postprocess_16bit"
+	.align 16
+	.type	__PRETTY_FUNCTION__.18, @object
+	.size	__PRETTY_FUNCTION__.18, 21
+__PRETTY_FUNCTION__.18:
+	.string	"stbi__convert_format"
+	.align 16
+	.type	__PRETTY_FUNCTION__.17, @object
+	.size	__PRETTY_FUNCTION__.17, 23
+__PRETTY_FUNCTION__.17:
+	.string	"stbi__convert_format16"
+	.align 16
+	.type	__PRETTY_FUNCTION__.16, @object
+	.size	__PRETTY_FUNCTION__.16, 23
+__PRETTY_FUNCTION__.16:
+	.string	"stbi__jpeg_huff_decode"
+	.type	tag.15, @object
+	.size	tag.15, 5
+tag.15:
+	.string	"JFIF"
+	.type	tag.14, @object
+	.size	tag.14, 6
+tag.14:
+	.string	"Adobe"
+	.type	rgb.13, @object
+	.size	rgb.13, 3
+rgb.13:
+	.ascii	"RGB"
+	.align 16
+	.type	__PRETTY_FUNCTION__.12, @object
+	.size	__PRETTY_FUNCTION__.12, 18
+__PRETTY_FUNCTION__.12:
+	.string	"stbi__bit_reverse"
+	.align 16
+	.type	length_dezigzag.11, @object
+	.size	length_dezigzag.11, 19
+length_dezigzag.11:
+	.string	"\020\021\022"
+	.ascii	"\b\007\t\006\n\005\013\004\f\003\r\002\016\001\017"
+	.align 8
+	.type	png_sig.10, @object
+	.size	png_sig.10, 8
+png_sig.10:
+	.ascii	"\211PNG\r\n\032\n"
+	.align 16
+	.type	__PRETTY_FUNCTION__.9, @object
+	.size	__PRETTY_FUNCTION__.9, 31
+__PRETTY_FUNCTION__.9:
+	.string	"stbi__create_png_alpha_expand8"
+	.align 16
+	.type	__PRETTY_FUNCTION__.8, @object
+	.size	__PRETTY_FUNCTION__.8, 27
+__PRETTY_FUNCTION__.8:
+	.string	"stbi__create_png_image_raw"
+	.align 16
+	.type	__PRETTY_FUNCTION__.7, @object
+	.size	__PRETTY_FUNCTION__.7, 27
+__PRETTY_FUNCTION__.7:
+	.string	"stbi__compute_transparency"
+	.align 16
+	.type	__PRETTY_FUNCTION__.6, @object
+	.size	__PRETTY_FUNCTION__.6, 29
+__PRETTY_FUNCTION__.6:
+	.string	"stbi__compute_transparency16"
+	.align 16
+	.type	__PRETTY_FUNCTION__.5, @object
+	.size	__PRETTY_FUNCTION__.5, 16
+__PRETTY_FUNCTION__.5:
+	.string	"stbi__de_iphone"
+	.data
+	.align 16
+	.type	invalid_chunk.4, @object
+	.size	invalid_chunk.4, 25
+invalid_chunk.4:
+	.string	"XXXX PNG chunk not known"
+	.section	.rodata
+	.align 16
+	.type	__PRETTY_FUNCTION__.3, @object
+	.size	__PRETTY_FUNCTION__.3, 18
+__PRETTY_FUNCTION__.3:
+	.string	"stbi__shiftsigned"
+	.data
+	.align 32
+	.type	mul_table.2, @object
+	.size	mul_table.2, 36
+mul_table.2:
+	.long	0
+	.long	255
+	.long	85
+	.long	73
+	.long	17
+	.long	33
+	.long	65
+	.long	129
+	.long	1
+	.align 32
+	.type	shift_table.1, @object
+	.size	shift_table.1, 36
+shift_table.1:
+	.long	0
+	.long	0
+	.long	0
+	.long	1
+	.long	0
+	.long	2
+	.long	4
+	.long	6
+	.long	0
+	.section	.rodata
+	.align 8
+	.type	__PRETTY_FUNCTION__.0, @object
+	.size	__PRETTY_FUNCTION__.0, 15
+__PRETTY_FUNCTION__.0:
+	.string	"stbi__tga_load"
+	.align 4
+.LC3:
+	.long	1065353216
+	.align 4
+.LC6:
+	.long	1132396544
+	.align 4
+.LC7:
+	.long	1056964608
+	.align 4
+.LC83:
+	.long	1199570688
+	.align 8
+.LC90:
+	.long	0
+	.long	1072693248
+	.align 4
+.LC91:
+	.long	1077936128
+	.align 8
+.LC106:
+	.long	0
+	.long	0
+	.align 8
+.LC111:
+	.long	0
+	.long	1073741824
+	.ident	"GCC: (Gentoo 13.2.1_p20240210 p14) 13.2.1 20240210"
+	.section	.note.GNU-stack,"",@progbits

+ 1 - 0
t.c

@@ -0,0 +1 @@
+extern char *tzname[2]; void main(void) {tzset(); long int t = time(0); int h = ((t / 3600) + (atoi(tzname[0]))) % 24; int m = (t / 60) % 60; int s = t % 60; printf("%d:%d:%d\n", h, m, s);}