From e15daed8c0b6fc2fb821bace0493925cdb9e30cd Mon Sep 17 00:00:00 2001 From: lolo859 Date: Tue, 31 Mar 2026 22:15:00 +0200 Subject: [PATCH] First commit, Vystem v0.1 --- Blastproof/blastproof.dsc | 31 + Blastproof/bootanim/bootanim.cpp | 271 + Blastproof/bootanim/logo.png | Bin 0 -> 5620 bytes Blastproof/bootanim/stb_image.h | 7988 ++++++++++ Blastproof/config/bp_template.conf | 48 + Blastproof/fontgen/chars/0x21.png | Bin 0 -> 105 bytes Blastproof/fontgen/chars/0x22.png | Bin 0 -> 105 bytes Blastproof/fontgen/chars/0x23.png | Bin 0 -> 117 bytes Blastproof/fontgen/chars/0x24.png | Bin 0 -> 158 bytes Blastproof/fontgen/chars/0x25.png | Bin 0 -> 159 bytes Blastproof/fontgen/chars/0x26.png | Bin 0 -> 150 bytes Blastproof/fontgen/chars/0x27.png | Bin 0 -> 97 bytes Blastproof/fontgen/chars/0x28.png | Bin 0 -> 126 bytes Blastproof/fontgen/chars/0x29.png | Bin 0 -> 128 bytes Blastproof/fontgen/chars/0x2A.png | Bin 0 -> 134 bytes Blastproof/fontgen/chars/0x2B.png | Bin 0 -> 114 bytes Blastproof/fontgen/chars/0x2C.png | Bin 0 -> 102 bytes Blastproof/fontgen/chars/0x2D.png | Bin 0 -> 97 bytes Blastproof/fontgen/chars/0x2E.png | Bin 0 -> 93 bytes Blastproof/fontgen/chars/0x2F.png | Bin 0 -> 141 bytes Blastproof/fontgen/chars/0x30.png | Bin 0 -> 147 bytes Blastproof/fontgen/chars/0x31.png | Bin 0 -> 127 bytes Blastproof/fontgen/chars/0x32.png | Bin 0 -> 146 bytes Blastproof/fontgen/chars/0x33.png | Bin 0 -> 157 bytes Blastproof/fontgen/chars/0x34.png | Bin 0 -> 138 bytes Blastproof/fontgen/chars/0x35.png | Bin 0 -> 147 bytes Blastproof/fontgen/chars/0x36.png | Bin 0 -> 151 bytes Blastproof/fontgen/chars/0x37.png | Bin 0 -> 146 bytes Blastproof/fontgen/chars/0x38.png | Bin 0 -> 134 bytes Blastproof/fontgen/chars/0x39.png | Bin 0 -> 149 bytes Blastproof/fontgen/chars/0x3A.png | Bin 0 -> 96 bytes Blastproof/fontgen/chars/0x3B.png | Bin 0 -> 107 bytes Blastproof/fontgen/chars/0x3C.png | Bin 0 -> 137 bytes Blastproof/fontgen/chars/0x3D.png | Bin 0 -> 97 bytes Blastproof/fontgen/chars/0x3E.png | Bin 0 -> 143 bytes Blastproof/fontgen/chars/0x3F.png | Bin 0 -> 144 bytes Blastproof/fontgen/chars/0x40.png | Bin 0 -> 145 bytes Blastproof/fontgen/chars/0x41.png | Bin 0 -> 138 bytes Blastproof/fontgen/chars/0x42.png | Bin 0 -> 139 bytes Blastproof/fontgen/chars/0x43.png | Bin 0 -> 143 bytes Blastproof/fontgen/chars/0x44.png | Bin 0 -> 135 bytes Blastproof/fontgen/chars/0x45.png | Bin 0 -> 123 bytes Blastproof/fontgen/chars/0x46.png | Bin 0 -> 119 bytes Blastproof/fontgen/chars/0x47.png | Bin 0 -> 150 bytes Blastproof/fontgen/chars/0x48.png | Bin 0 -> 123 bytes Blastproof/fontgen/chars/0x49.png | Bin 0 -> 120 bytes Blastproof/fontgen/chars/0x4A.png | Bin 0 -> 135 bytes Blastproof/fontgen/chars/0x4B.png | Bin 0 -> 145 bytes Blastproof/fontgen/chars/0x4C.png | Bin 0 -> 108 bytes Blastproof/fontgen/chars/0x4D.png | Bin 0 -> 132 bytes Blastproof/fontgen/chars/0x4E.png | Bin 0 -> 141 bytes Blastproof/fontgen/chars/0x4F.png | Bin 0 -> 135 bytes Blastproof/fontgen/chars/0x50.png | Bin 0 -> 133 bytes Blastproof/fontgen/chars/0x51.png | Bin 0 -> 146 bytes Blastproof/fontgen/chars/0x52.png | Bin 0 -> 151 bytes Blastproof/fontgen/chars/0x53.png | Bin 0 -> 150 bytes Blastproof/fontgen/chars/0x54.png | Bin 0 -> 113 bytes Blastproof/fontgen/chars/0x55.png | Bin 0 -> 124 bytes Blastproof/fontgen/chars/0x56.png | Bin 0 -> 137 bytes Blastproof/fontgen/chars/0x57.png | Bin 0 -> 137 bytes Blastproof/fontgen/chars/0x58.png | Bin 0 -> 151 bytes Blastproof/fontgen/chars/0x59.png | Bin 0 -> 141 bytes Blastproof/fontgen/chars/0x5A.png | Bin 0 -> 137 bytes Blastproof/fontgen/chars/0x5B.png | Bin 0 -> 121 bytes Blastproof/fontgen/chars/0x5C.png | Bin 0 -> 143 bytes Blastproof/fontgen/chars/0x5D.png | Bin 0 -> 120 bytes Blastproof/fontgen/chars/0x5E.png | Bin 0 -> 123 bytes Blastproof/fontgen/chars/0x5F.png | Bin 0 -> 94 bytes Blastproof/fontgen/chars/0x60.png | Bin 0 -> 118 bytes Blastproof/fontgen/chars/0x61.png | Bin 0 -> 129 bytes Blastproof/fontgen/chars/0x62.png | Bin 0 -> 145 bytes Blastproof/fontgen/chars/0x63.png | Bin 0 -> 145 bytes Blastproof/fontgen/chars/0x64.png | Bin 0 -> 145 bytes Blastproof/fontgen/chars/0x65.png | Bin 0 -> 144 bytes Blastproof/fontgen/chars/0x66.png | Bin 0 -> 129 bytes Blastproof/fontgen/chars/0x67.png | Bin 0 -> 154 bytes Blastproof/fontgen/chars/0x68.png | Bin 0 -> 132 bytes Blastproof/fontgen/chars/0x69.png | Bin 0 -> 101 bytes Blastproof/fontgen/chars/0x6A.png | Bin 0 -> 126 bytes Blastproof/fontgen/chars/0x6B.png | Bin 0 -> 136 bytes Blastproof/fontgen/chars/0x6C.png | Bin 0 -> 97 bytes Blastproof/fontgen/chars/0x6D.png | Bin 0 -> 115 bytes Blastproof/fontgen/chars/0x6E.png | Bin 0 -> 125 bytes Blastproof/fontgen/chars/0x6F.png | Bin 0 -> 137 bytes Blastproof/fontgen/chars/0x70.png | Bin 0 -> 141 bytes Blastproof/fontgen/chars/0x71.png | Bin 0 -> 140 bytes Blastproof/fontgen/chars/0x72.png | Bin 0 -> 125 bytes Blastproof/fontgen/chars/0x73.png | Bin 0 -> 138 bytes Blastproof/fontgen/chars/0x74.png | Bin 0 -> 128 bytes Blastproof/fontgen/chars/0x75.png | Bin 0 -> 128 bytes Blastproof/fontgen/chars/0x76.png | Bin 0 -> 138 bytes Blastproof/fontgen/chars/0x77.png | Bin 0 -> 134 bytes Blastproof/fontgen/chars/0x78.png | Bin 0 -> 136 bytes Blastproof/fontgen/chars/0x79.png | Bin 0 -> 151 bytes Blastproof/fontgen/chars/0x7A.png | Bin 0 -> 127 bytes Blastproof/fontgen/chars/0x7B.png | Bin 0 -> 136 bytes Blastproof/fontgen/chars/0x7C.png | Bin 0 -> 91 bytes Blastproof/fontgen/chars/0x7D.png | Bin 0 -> 140 bytes Blastproof/fontgen/chars/0x7E.png | Bin 0 -> 126 bytes Blastproof/fontgen/chars/0xFF.png | Bin 0 -> 330 bytes Blastproof/fontgen/fontgen.cpp | 259 + Blastproof/fontgen/stb_image.h | 7988 ++++++++++ Blastproof/initfsgen/address.c | 104 + Blastproof/initfsgen/address.h | 51 + Blastproof/initfsgen/api.h | 77 + Blastproof/initfsgen/build.sh | 4 + Blastproof/initfsgen/context.h | 28 + Blastproof/initfsgen/fors.c | 161 + Blastproof/initfsgen/fors.h | 32 + Blastproof/initfsgen/hash.h | 27 + Blastproof/initfsgen/hash_sha2.c | 197 + Blastproof/initfsgen/initfsgen.cpp | 241 + Blastproof/initfsgen/merkle.c | 61 + Blastproof/initfsgen/merkle.h | 18 + Blastproof/initfsgen/params.h | 3 + .../params/params-sphincs-sha2-256f.h | 85 + Blastproof/initfsgen/randombytes.c | 43 + Blastproof/initfsgen/randombytes.h | 6 + Blastproof/initfsgen/sha2.c | 700 + Blastproof/initfsgen/sha2.h | 43 + Blastproof/initfsgen/sha2_offsets.h | 20 + Blastproof/initfsgen/sha3.c | 190 + Blastproof/initfsgen/sha3.h | 46 + Blastproof/initfsgen/sign.c | 287 + Blastproof/initfsgen/thash.h | 13 + Blastproof/initfsgen/thash_sha2_robust.c | 74 + Blastproof/initfsgen/thash_sha2_simple.c | 59 + Blastproof/initfsgen/utils.c | 154 + Blastproof/initfsgen/utils.h | 64 + Blastproof/initfsgen/utilsx1.c | 100 + Blastproof/initfsgen/utilsx1.h | 26 + Blastproof/initfsgen/wots.c | 112 + Blastproof/initfsgen/wots.h | 25 + Blastproof/initfsgen/wotsx1.c | 73 + Blastproof/initfsgen/wotsx1.h | 36 + Blastproof/keygen/address.c | 104 + Blastproof/keygen/address.h | 51 + Blastproof/keygen/address.o | Bin 0 -> 2784 bytes Blastproof/keygen/api.h | 77 + Blastproof/keygen/argon2.h | 437 + Blastproof/keygen/argon2/Argon2.sln | 158 + Blastproof/keygen/argon2/CHANGELOG.md | 32 + Blastproof/keygen/argon2/LICENSE | 314 + Blastproof/keygen/argon2/Makefile | 255 + Blastproof/keygen/argon2/Package.swift | 46 + Blastproof/keygen/argon2/README.md | 303 + Blastproof/keygen/argon2/argon2 | Bin 0 -> 221152 bytes Blastproof/keygen/argon2/argon2-specs.pdf | Bin 0 -> 459608 bytes Blastproof/keygen/argon2/kats/argon2d | 12304 ++++++++++++++++ Blastproof/keygen/argon2/kats/argon2d.shasum | 1 + Blastproof/keygen/argon2/kats/argon2d_v16 | 12304 ++++++++++++++++ .../keygen/argon2/kats/argon2d_v16.shasum | 1 + Blastproof/keygen/argon2/kats/argon2i | 12304 ++++++++++++++++ Blastproof/keygen/argon2/kats/argon2i.shasum | 1 + Blastproof/keygen/argon2/kats/argon2i_v16 | 12304 ++++++++++++++++ .../keygen/argon2/kats/argon2i_v16.shasum | 1 + Blastproof/keygen/argon2/kats/argon2id | 12304 ++++++++++++++++ Blastproof/keygen/argon2/kats/argon2id.shasum | 1 + Blastproof/keygen/argon2/kats/argon2id_v16 | 12304 ++++++++++++++++ .../keygen/argon2/kats/argon2id_v16.shasum | 1 + Blastproof/keygen/argon2/kats/check-sums.ps1 | 42 + Blastproof/keygen/argon2/kats/check-sums.sh | 13 + Blastproof/keygen/argon2/kats/test.ps1 | 50 + Blastproof/keygen/argon2/kats/test.sh | 49 + Blastproof/keygen/argon2/libargon2.pc | 13 + Blastproof/keygen/argon2/libargon2.pc.in | 18 + Blastproof/keygen/argon2/libargon2.so.1 | Bin 0 -> 177160 bytes Blastproof/keygen/argon2/man/argon2.1 | 57 + Blastproof/keygen/argon2/src/argon2.c | 452 + Blastproof/keygen/argon2/src/argon2.o | Bin 0 -> 59208 bytes Blastproof/keygen/argon2/src/bench.c | 111 + .../keygen/argon2/src/blake2/blake2-impl.h | 156 + Blastproof/keygen/argon2/src/blake2/blake2.h | 89 + Blastproof/keygen/argon2/src/blake2/blake2b.c | 390 + Blastproof/keygen/argon2/src/blake2/blake2b.o | Bin 0 -> 41456 bytes .../argon2/src/blake2/blamka-round-opt.h | 471 + .../argon2/src/blake2/blamka-round-ref.h | 56 + Blastproof/keygen/argon2/src/core.c | 648 + Blastproof/keygen/argon2/src/core.h | 228 + Blastproof/keygen/argon2/src/core.o | Bin 0 -> 37976 bytes Blastproof/keygen/argon2/src/encoding.c | 463 + Blastproof/keygen/argon2/src/encoding.h | 57 + Blastproof/keygen/argon2/src/encoding.o | Bin 0 -> 25064 bytes Blastproof/keygen/argon2/src/genkat.c | 213 + Blastproof/keygen/argon2/src/genkat.h | 51 + Blastproof/keygen/argon2/src/opt.c | 283 + Blastproof/keygen/argon2/src/opt.o | Bin 0 -> 75600 bytes Blastproof/keygen/argon2/src/ref.c | 194 + Blastproof/keygen/argon2/src/run.c | 337 + Blastproof/keygen/argon2/src/test.c | 289 + Blastproof/keygen/argon2/src/thread.c | 57 + Blastproof/keygen/argon2/src/thread.h | 67 + Blastproof/keygen/argon2/src/thread.o | Bin 0 -> 5728 bytes .../argon2/vs2015/Argon2Opt/Argon2Opt.vcxproj | 231 + .../Argon2Opt/Argon2Opt.vcxproj.filters | 69 + .../Argon2OptBench/Argon2OptBench.vcxproj | 231 + .../Argon2OptBench.vcxproj.filters | 69 + .../vs2015/Argon2OptDll/Argon2OptDll.vcxproj | 230 + .../Argon2OptDll/Argon2OptDll.vcxproj.filters | 66 + .../Argon2OptGenKAT/Argon2OptGenKAT.vcxproj | 244 + .../Argon2OptGenKAT.vcxproj.filters | 72 + .../Argon2OptTestCI/Argon2OptTestCI.vcxproj | 235 + .../Argon2OptTestCI.vcxproj.filters | 69 + .../argon2/vs2015/Argon2Ref/Argon2Ref.vcxproj | 243 + .../Argon2Ref/Argon2Ref.vcxproj.filters | 69 + .../Argon2RefBench/Argon2RefBench.vcxproj | 231 + .../Argon2RefBench.vcxproj.filters | 69 + .../vs2015/Argon2RefDll/Argon2RefDll.vcxproj | 230 + .../Argon2RefDll/Argon2RefDll.vcxproj.filters | 66 + .../Argon2RefGenKAT/Argon2RefGenKAT.vcxproj | 232 + .../Argon2RefGenKAT.vcxproj.filters | 72 + .../Argon2RefTestCI/Argon2RefTestCI.vcxproj | 231 + .../Argon2RefTestCI.vcxproj.filters | 69 + Blastproof/keygen/build.sh | 3 + Blastproof/keygen/context.h | 28 + Blastproof/keygen/fors.c | 161 + Blastproof/keygen/fors.h | 32 + Blastproof/keygen/fors.o | Bin 0 -> 4320 bytes Blastproof/keygen/hash.h | 27 + Blastproof/keygen/hash_sha2.c | 197 + Blastproof/keygen/hash_sha2.o | Bin 0 -> 4400 bytes Blastproof/keygen/keygen.cpp | 372 + Blastproof/keygen/merkle.c | 61 + Blastproof/keygen/merkle.h | 18 + Blastproof/keygen/merkle.o | Bin 0 -> 2584 bytes Blastproof/keygen/params.h | 3 + .../keygen/params/params-sphincs-sha2-256f.h | 85 + Blastproof/keygen/randombytes.c | 43 + Blastproof/keygen/randombytes.h | 6 + Blastproof/keygen/randombytes.o | Bin 0 -> 1952 bytes Blastproof/keygen/sha2.c | 700 + Blastproof/keygen/sha2.h | 43 + Blastproof/keygen/sha2.o | Bin 0 -> 61808 bytes Blastproof/keygen/sha2_offsets.h | 20 + Blastproof/keygen/sha3.c | 190 + Blastproof/keygen/sha3.h | 46 + Blastproof/keygen/sha3.o | Bin 0 -> 4896 bytes Blastproof/keygen/sign.c | 287 + Blastproof/keygen/sign.o | Bin 0 -> 6816 bytes Blastproof/keygen/thash.h | 13 + Blastproof/keygen/thash_sha2_robust.c | 74 + Blastproof/keygen/thash_sha2_robust.o | Bin 0 -> 3080 bytes Blastproof/keygen/thash_sha2_simple.c | 59 + Blastproof/keygen/utils.c | 154 + Blastproof/keygen/utils.h | 64 + Blastproof/keygen/utils.o | Bin 0 -> 3960 bytes Blastproof/keygen/utilsx1.c | 100 + Blastproof/keygen/utilsx1.h | 26 + Blastproof/keygen/utilsx1.o | Bin 0 -> 2384 bytes Blastproof/keygen/wots.c | 112 + Blastproof/keygen/wots.h | 25 + Blastproof/keygen/wots.o | Bin 0 -> 2864 bytes Blastproof/keygen/wotsx1.c | 73 + Blastproof/keygen/wotsx1.h | 36 + Blastproof/keygen/wotsx1.o | Bin 0 -> 2528 bytes Blastproof/src/keycard.asm | 12 + Blastproof/src/libs/argon2/argon2.c | 456 + Blastproof/src/libs/argon2/argon2.h | 447 + Blastproof/src/libs/argon2/blake2-impl.h | 158 + Blastproof/src/libs/argon2/blake2.h | 97 + Blastproof/src/libs/argon2/blake2b.c | 398 + Blastproof/src/libs/argon2/blamka-round-opt.h | 475 + Blastproof/src/libs/argon2/blamka-round-ref.h | 60 + Blastproof/src/libs/argon2/core.c | 656 + Blastproof/src/libs/argon2/core.h | 237 + Blastproof/src/libs/argon2/encoding.c | 473 + Blastproof/src/libs/argon2/encoding.h | 62 + Blastproof/src/libs/argon2/opt.c | 285 + Blastproof/src/libs/argon2/thread.c | 57 + Blastproof/src/libs/argon2/thread.h | 67 + Blastproof/src/libs/include/conf.h | 67 + Blastproof/src/libs/include/console.h | 13 + Blastproof/src/libs/include/cpu.h | 18 + Blastproof/src/libs/include/crypto.h | 34 + Blastproof/src/libs/include/debug.h | 7 + Blastproof/src/libs/include/default.h | 20 + Blastproof/src/libs/include/disk.h | 18 + Blastproof/src/libs/include/font.h | 57 + Blastproof/src/libs/include/graphic.h | 18 + Blastproof/src/libs/include/initfs.h | 84 + Blastproof/src/libs/include/ui.h | 27 + Blastproof/src/libs/include/vyx.h | 74 + Blastproof/src/libs/sha3/sha3.c | 165 + Blastproof/src/libs/sha3/sha3.h | 34 + Blastproof/src/libs/sphincsplus/address.c | 107 + Blastproof/src/libs/sphincsplus/address.h | 55 + Blastproof/src/libs/sphincsplus/api.h | 80 + Blastproof/src/libs/sphincsplus/context.h | 32 + Blastproof/src/libs/sphincsplus/fors.c | 163 + Blastproof/src/libs/sphincsplus/fors.h | 36 + Blastproof/src/libs/sphincsplus/hash.h | 32 + Blastproof/src/libs/sphincsplus/hash_sha2.c | 198 + Blastproof/src/libs/sphincsplus/merkle.c | 64 + Blastproof/src/libs/sphincsplus/merkle.h | 22 + Blastproof/src/libs/sphincsplus/params.h | 3 + .../params/params-sphincs-sha2-256f.h | 85 + Blastproof/src/libs/sphincsplus/sha2.c | 702 + Blastproof/src/libs/sphincsplus/sha2.h | 46 + .../src/libs/sphincsplus/sha2_offsets.h | 20 + Blastproof/src/libs/sphincsplus/sign.c | 289 + Blastproof/src/libs/sphincsplus/thash.h | 17 + .../src/libs/sphincsplus/thash_sha2_robust.c | 77 + .../src/libs/sphincsplus/thash_sha2_simple.c | 62 + Blastproof/src/libs/sphincsplus/utils.c | 158 + Blastproof/src/libs/sphincsplus/utils.h | 69 + Blastproof/src/libs/sphincsplus/utilsx1.c | 104 + Blastproof/src/libs/sphincsplus/utilsx1.h | 30 + Blastproof/src/libs/sphincsplus/wots.c | 115 + Blastproof/src/libs/sphincsplus/wots.h | 29 + Blastproof/src/libs/sphincsplus/wotsx1.c | 76 + Blastproof/src/libs/sphincsplus/wotsx1.h | 40 + Blastproof/src/libs/src/conf.c | 432 + Blastproof/src/libs/src/console.c | 77 + Blastproof/src/libs/src/cpu.c | 29 + Blastproof/src/libs/src/crypto.c | 273 + Blastproof/src/libs/src/debug.c | 30 + Blastproof/src/libs/src/disk.c | 69 + Blastproof/src/libs/src/font.c | 234 + Blastproof/src/libs/src/graphic.c | 147 + Blastproof/src/libs/src/initfs.c | 357 + Blastproof/src/libs/src/ui.c | 311 + Blastproof/src/libs/src/vyx.c | 305 + Blastproof/src/main.c | 908 ++ Blastproof/src/main.inf | 49 + build.sh | 353 + docs/blastproof/bootconfig.md | 111 + docs/blastproof/bootprocess.md | 35 + docs/blastproof/bplib.md | 44 + docs/blastproof/fbm.md | 53 + docs/blastproof/fs.md | 91 + docs/blastproof/index.md | 21 + docs/blastproof/sbfie.md | 53 + docs/blastproof/security.md | 22 + docs/boottools/bootanim.md | 66 + docs/boottools/fontgen.md | 52 + docs/boottools/index.md | 11 + docs/boottools/initfsgen.md | 50 + docs/boottools/keygen.md | 47 + docs/build.md | 54 + docs/index.md | 22 + docs/kerneltools/index.md | 10 + docs/kerneltools/payloads.md | 28 + docs/kerneltools/vmemcheck.md | 24 + docs/licenses.md | 40 + docs/roadmap.md | 13 + docs/shelter/bench.md | 149 + docs/shelter/bootconfig.md | 64 + docs/shelter/bootcontract.md | 66 + docs/shelter/bootprocess.md | 89 + docs/shelter/cpu/asmint.md | 13 + docs/shelter/cpu/cpuabstract.md | 11 + docs/shelter/cpu/serial.md | 12 + docs/shelter/cpu/tsc.md | 20 + docs/shelter/index.md | 35 + docs/shelter/kernel/config.md | 3 + docs/shelter/kernel/kernel.md | 11 + docs/shelter/kernel/log.md | 137 + docs/shelter/kernel/testutils.md | 23 + docs/shelter/keycard.md | 21 + docs/shelter/memmap.md | 37 + docs/shelter/memory/heap.md | 45 + docs/shelter/memory/index.md | 16 + docs/shelter/memory/page.md | 90 + docs/shelter/memory/pba.md | 19 + docs/shelter/memory/pez.md | 86 + docs/shelter/memory/radix.md | 37 + docs/shelter/memory/ring.md | 11 + docs/shelter/memory/slabs.md | 91 + docs/shelter/memory/vmemlayout.md | 9 + docs/shelter/naming.md | 21 + docs/shelter/ptp.md | 23 + docs/shelter/std/malloc.md | 7 + docs/shelter/std/mem.md | 6 + docs/shelter/std/status.md | 20 + docs/shelter/std/std.md | 14 + docs/shelter/std/types.md | 35 + docs/shelter/tab.md | 53 + docs/vyld/index.md | 5 + docs/vyld/vyld.md | 67 + docs/vyld/vyx.md | 32 + licenses/MIT.md | 25 + licenses/MPL-2.0.md | 355 + .../third-party/bsd-2-clause-patent_edk2.md | 51 + licenses/third-party/cc0-1.0_argon2.md | 121 + licenses/third-party/mit-0_sphincsplus.md | 21 + licenses/third-party/mit_elfio.md | 21 + licenses/third-party/mit_tiny_sha3.md | 21 + readme.md | 19 + shelter/lib/include/cpu/asm.h | 26 + shelter/lib/include/cpu/serial.h | 11 + shelter/lib/include/cpu/tsc.h | 18 + shelter/lib/include/kernel/conf.h | 25 + shelter/lib/include/kernel/log.h | 146 + shelter/lib/include/kernel/test.h | 7 + .../lib/include/kernel/tests/test_malloc.h | 9 + shelter/lib/include/kernel/tests/test_pez.h | 11 + shelter/lib/include/kernel/tests/test_radix.h | 14 + shelter/lib/include/kernel/tests/test_slabs.h | 19 + shelter/lib/include/kernel/tests/test_utils.h | 13 + shelter/lib/include/memory/heap.h | 34 + shelter/lib/include/memory/page.h | 159 + shelter/lib/include/memory/pba.h | 20 + shelter/lib/include/memory/pez/pez.h | 71 + shelter/lib/include/memory/pez/pez_debug.h | 103 + shelter/lib/include/memory/pez/radix.h | 31 + shelter/lib/include/memory/ring.h | 18 + shelter/lib/include/memory/slab.h | 6 + .../lib/include/memory/slabs/slab_generic.h | 60 + .../include/memory/slabs/slab_radix_node.h | 59 + .../lib/include/memory/slabs/slab_reg_phys.h | 58 + .../lib/include/memory/slabs/slab_reg_virt.h | 58 + shelter/lib/include/memory/vmem_layout.h | 93 + shelter/lib/include/std/malloc.h | 12 + shelter/lib/include/std/mem.h | 12 + shelter/lib/include/std/status.h | 43 + shelter/lib/include/std/stdlib.h | 7 + shelter/lib/include/std/type.h | 35 + shelter/lib/src/cpu/serial.c | 14 + shelter/lib/src/cpu/tsc.c | 13 + shelter/lib/src/kernel/conf.c | 19 + shelter/lib/src/kernel/log.c | 720 + shelter/lib/src/kernel/tests/test_malloc.c | 92 + shelter/lib/src/kernel/tests/test_pez.c | 77 + shelter/lib/src/kernel/tests/test_radix.c | 92 + shelter/lib/src/kernel/tests/test_slabs.c | 147 + shelter/lib/src/kernel/tests/test_utils.c | 59 + shelter/lib/src/memory/heap.c | 95 + shelter/lib/src/memory/page.c | 669 + shelter/lib/src/memory/pba.c | 71 + shelter/lib/src/memory/pez/pez.c | 860 ++ shelter/lib/src/memory/pez/pez_debug.c | 113 + shelter/lib/src/memory/pez/radix.c | 182 + shelter/lib/src/memory/ring.c | 27 + shelter/lib/src/memory/slabs/slab_generic.c | 205 + .../lib/src/memory/slabs/slab_radix_node.c | 227 + shelter/lib/src/memory/slabs/slab_reg_phys.c | 274 + shelter/lib/src/memory/slabs/slab_reg_virt.c | 271 + shelter/lib/src/std/malloc.c | 39 + shelter/lib/src/std/mem.c | 35 + shelter/main.c | 260 + shelter/tools/checker/vmem_layout_check.py | 119 + shelter/tools/generator/malloc_payload_gen.py | 43 + .../generator/pez_alloc_free_payload_gen.py | 34 + .../tools/generator/radix_tree_payload_gen.py | 18 + vyld/_vyx_start.c | 6 + vyld/elfio.hpp | 1260 ++ vyld/elfio/elf_types.hpp | 1564 ++ vyld/elfio/elfio_array.hpp | 119 + vyld/elfio/elfio_dump.hpp | 1388 ++ vyld/elfio/elfio_dynamic.hpp | 300 + vyld/elfio/elfio_header.hpp | 192 + vyld/elfio/elfio_modinfo.hpp | 168 + vyld/elfio/elfio_note.hpp | 208 + vyld/elfio/elfio_relocation.hpp | 596 + vyld/elfio/elfio_section.hpp | 611 + vyld/elfio/elfio_segment.hpp | 405 + vyld/elfio/elfio_strings.hpp | 143 + vyld/elfio/elfio_symbols.hpp | 716 + vyld/elfio/elfio_utils.hpp | 373 + vyld/elfio/elfio_version.hpp | 4 + vyld/elfio/elfio_versym.hpp | 310 + vyld/vyld.cpp | 327 + 462 files changed, 134655 insertions(+) create mode 100644 Blastproof/blastproof.dsc create mode 100644 Blastproof/bootanim/bootanim.cpp create mode 100644 Blastproof/bootanim/logo.png create mode 100644 Blastproof/bootanim/stb_image.h create mode 100644 Blastproof/config/bp_template.conf create mode 100644 Blastproof/fontgen/chars/0x21.png create mode 100644 Blastproof/fontgen/chars/0x22.png create mode 100644 Blastproof/fontgen/chars/0x23.png create mode 100644 Blastproof/fontgen/chars/0x24.png create mode 100644 Blastproof/fontgen/chars/0x25.png create mode 100644 Blastproof/fontgen/chars/0x26.png create mode 100644 Blastproof/fontgen/chars/0x27.png create mode 100644 Blastproof/fontgen/chars/0x28.png create mode 100644 Blastproof/fontgen/chars/0x29.png create mode 100644 Blastproof/fontgen/chars/0x2A.png create mode 100644 Blastproof/fontgen/chars/0x2B.png create mode 100644 Blastproof/fontgen/chars/0x2C.png create mode 100644 Blastproof/fontgen/chars/0x2D.png create mode 100644 Blastproof/fontgen/chars/0x2E.png create mode 100644 Blastproof/fontgen/chars/0x2F.png create mode 100644 Blastproof/fontgen/chars/0x30.png create mode 100644 Blastproof/fontgen/chars/0x31.png create mode 100644 Blastproof/fontgen/chars/0x32.png create mode 100644 Blastproof/fontgen/chars/0x33.png create mode 100644 Blastproof/fontgen/chars/0x34.png create mode 100644 Blastproof/fontgen/chars/0x35.png create mode 100644 Blastproof/fontgen/chars/0x36.png create mode 100644 Blastproof/fontgen/chars/0x37.png create mode 100644 Blastproof/fontgen/chars/0x38.png create mode 100644 Blastproof/fontgen/chars/0x39.png create mode 100644 Blastproof/fontgen/chars/0x3A.png create mode 100644 Blastproof/fontgen/chars/0x3B.png create mode 100644 Blastproof/fontgen/chars/0x3C.png create mode 100644 Blastproof/fontgen/chars/0x3D.png create mode 100644 Blastproof/fontgen/chars/0x3E.png create mode 100644 Blastproof/fontgen/chars/0x3F.png create mode 100644 Blastproof/fontgen/chars/0x40.png create mode 100644 Blastproof/fontgen/chars/0x41.png create mode 100644 Blastproof/fontgen/chars/0x42.png create mode 100644 Blastproof/fontgen/chars/0x43.png create mode 100644 Blastproof/fontgen/chars/0x44.png create mode 100644 Blastproof/fontgen/chars/0x45.png create mode 100644 Blastproof/fontgen/chars/0x46.png create mode 100644 Blastproof/fontgen/chars/0x47.png create mode 100644 Blastproof/fontgen/chars/0x48.png create mode 100644 Blastproof/fontgen/chars/0x49.png create mode 100644 Blastproof/fontgen/chars/0x4A.png create mode 100644 Blastproof/fontgen/chars/0x4B.png create mode 100644 Blastproof/fontgen/chars/0x4C.png create mode 100644 Blastproof/fontgen/chars/0x4D.png create mode 100644 Blastproof/fontgen/chars/0x4E.png create mode 100644 Blastproof/fontgen/chars/0x4F.png create mode 100644 Blastproof/fontgen/chars/0x50.png create mode 100644 Blastproof/fontgen/chars/0x51.png create mode 100644 Blastproof/fontgen/chars/0x52.png create mode 100644 Blastproof/fontgen/chars/0x53.png create mode 100644 Blastproof/fontgen/chars/0x54.png create mode 100644 Blastproof/fontgen/chars/0x55.png create mode 100644 Blastproof/fontgen/chars/0x56.png create mode 100644 Blastproof/fontgen/chars/0x57.png create mode 100644 Blastproof/fontgen/chars/0x58.png create mode 100644 Blastproof/fontgen/chars/0x59.png create mode 100644 Blastproof/fontgen/chars/0x5A.png create mode 100644 Blastproof/fontgen/chars/0x5B.png create mode 100644 Blastproof/fontgen/chars/0x5C.png create mode 100644 Blastproof/fontgen/chars/0x5D.png create mode 100644 Blastproof/fontgen/chars/0x5E.png create mode 100644 Blastproof/fontgen/chars/0x5F.png create mode 100644 Blastproof/fontgen/chars/0x60.png create mode 100644 Blastproof/fontgen/chars/0x61.png create mode 100644 Blastproof/fontgen/chars/0x62.png create mode 100644 Blastproof/fontgen/chars/0x63.png create mode 100644 Blastproof/fontgen/chars/0x64.png create mode 100644 Blastproof/fontgen/chars/0x65.png create mode 100644 Blastproof/fontgen/chars/0x66.png create mode 100644 Blastproof/fontgen/chars/0x67.png create mode 100644 Blastproof/fontgen/chars/0x68.png create mode 100644 Blastproof/fontgen/chars/0x69.png create mode 100644 Blastproof/fontgen/chars/0x6A.png create mode 100644 Blastproof/fontgen/chars/0x6B.png create mode 100644 Blastproof/fontgen/chars/0x6C.png create mode 100644 Blastproof/fontgen/chars/0x6D.png create mode 100644 Blastproof/fontgen/chars/0x6E.png create mode 100644 Blastproof/fontgen/chars/0x6F.png create mode 100644 Blastproof/fontgen/chars/0x70.png create mode 100644 Blastproof/fontgen/chars/0x71.png create mode 100644 Blastproof/fontgen/chars/0x72.png create mode 100644 Blastproof/fontgen/chars/0x73.png create mode 100644 Blastproof/fontgen/chars/0x74.png create mode 100644 Blastproof/fontgen/chars/0x75.png create mode 100644 Blastproof/fontgen/chars/0x76.png create mode 100644 Blastproof/fontgen/chars/0x77.png create mode 100644 Blastproof/fontgen/chars/0x78.png create mode 100644 Blastproof/fontgen/chars/0x79.png create mode 100644 Blastproof/fontgen/chars/0x7A.png create mode 100644 Blastproof/fontgen/chars/0x7B.png create mode 100644 Blastproof/fontgen/chars/0x7C.png create mode 100644 Blastproof/fontgen/chars/0x7D.png create mode 100644 Blastproof/fontgen/chars/0x7E.png create mode 100644 Blastproof/fontgen/chars/0xFF.png create mode 100644 Blastproof/fontgen/fontgen.cpp create mode 100644 Blastproof/fontgen/stb_image.h create mode 100644 Blastproof/initfsgen/address.c create mode 100644 Blastproof/initfsgen/address.h create mode 100644 Blastproof/initfsgen/api.h create mode 100755 Blastproof/initfsgen/build.sh create mode 100644 Blastproof/initfsgen/context.h create mode 100644 Blastproof/initfsgen/fors.c create mode 100644 Blastproof/initfsgen/fors.h create mode 100644 Blastproof/initfsgen/hash.h create mode 100644 Blastproof/initfsgen/hash_sha2.c create mode 100644 Blastproof/initfsgen/initfsgen.cpp create mode 100644 Blastproof/initfsgen/merkle.c create mode 100644 Blastproof/initfsgen/merkle.h create mode 100644 Blastproof/initfsgen/params.h create mode 100644 Blastproof/initfsgen/params/params-sphincs-sha2-256f.h create mode 100644 Blastproof/initfsgen/randombytes.c create mode 100644 Blastproof/initfsgen/randombytes.h create mode 100644 Blastproof/initfsgen/sha2.c create mode 100644 Blastproof/initfsgen/sha2.h create mode 100644 Blastproof/initfsgen/sha2_offsets.h create mode 100644 Blastproof/initfsgen/sha3.c create mode 100644 Blastproof/initfsgen/sha3.h create mode 100644 Blastproof/initfsgen/sign.c create mode 100644 Blastproof/initfsgen/thash.h create mode 100644 Blastproof/initfsgen/thash_sha2_robust.c create mode 100644 Blastproof/initfsgen/thash_sha2_simple.c create mode 100644 Blastproof/initfsgen/utils.c create mode 100644 Blastproof/initfsgen/utils.h create mode 100644 Blastproof/initfsgen/utilsx1.c create mode 100644 Blastproof/initfsgen/utilsx1.h create mode 100644 Blastproof/initfsgen/wots.c create mode 100644 Blastproof/initfsgen/wots.h create mode 100644 Blastproof/initfsgen/wotsx1.c create mode 100644 Blastproof/initfsgen/wotsx1.h create mode 100644 Blastproof/keygen/address.c create mode 100644 Blastproof/keygen/address.h create mode 100644 Blastproof/keygen/address.o create mode 100644 Blastproof/keygen/api.h create mode 100644 Blastproof/keygen/argon2.h create mode 100644 Blastproof/keygen/argon2/Argon2.sln create mode 100644 Blastproof/keygen/argon2/CHANGELOG.md create mode 100644 Blastproof/keygen/argon2/LICENSE create mode 100644 Blastproof/keygen/argon2/Makefile create mode 100644 Blastproof/keygen/argon2/Package.swift create mode 100644 Blastproof/keygen/argon2/README.md create mode 100755 Blastproof/keygen/argon2/argon2 create mode 100644 Blastproof/keygen/argon2/argon2-specs.pdf create mode 100644 Blastproof/keygen/argon2/kats/argon2d create mode 100644 Blastproof/keygen/argon2/kats/argon2d.shasum create mode 100644 Blastproof/keygen/argon2/kats/argon2d_v16 create mode 100644 Blastproof/keygen/argon2/kats/argon2d_v16.shasum create mode 100644 Blastproof/keygen/argon2/kats/argon2i create mode 100644 Blastproof/keygen/argon2/kats/argon2i.shasum create mode 100644 Blastproof/keygen/argon2/kats/argon2i_v16 create mode 100644 Blastproof/keygen/argon2/kats/argon2i_v16.shasum create mode 100644 Blastproof/keygen/argon2/kats/argon2id create mode 100644 Blastproof/keygen/argon2/kats/argon2id.shasum create mode 100644 Blastproof/keygen/argon2/kats/argon2id_v16 create mode 100644 Blastproof/keygen/argon2/kats/argon2id_v16.shasum create mode 100644 Blastproof/keygen/argon2/kats/check-sums.ps1 create mode 100755 Blastproof/keygen/argon2/kats/check-sums.sh create mode 100644 Blastproof/keygen/argon2/kats/test.ps1 create mode 100755 Blastproof/keygen/argon2/kats/test.sh create mode 100644 Blastproof/keygen/argon2/libargon2.pc create mode 100644 Blastproof/keygen/argon2/libargon2.pc.in create mode 100755 Blastproof/keygen/argon2/libargon2.so.1 create mode 100644 Blastproof/keygen/argon2/man/argon2.1 create mode 100644 Blastproof/keygen/argon2/src/argon2.c create mode 100644 Blastproof/keygen/argon2/src/argon2.o create mode 100644 Blastproof/keygen/argon2/src/bench.c create mode 100644 Blastproof/keygen/argon2/src/blake2/blake2-impl.h create mode 100644 Blastproof/keygen/argon2/src/blake2/blake2.h create mode 100644 Blastproof/keygen/argon2/src/blake2/blake2b.c create mode 100644 Blastproof/keygen/argon2/src/blake2/blake2b.o create mode 100644 Blastproof/keygen/argon2/src/blake2/blamka-round-opt.h create mode 100644 Blastproof/keygen/argon2/src/blake2/blamka-round-ref.h create mode 100644 Blastproof/keygen/argon2/src/core.c create mode 100644 Blastproof/keygen/argon2/src/core.h create mode 100644 Blastproof/keygen/argon2/src/core.o create mode 100644 Blastproof/keygen/argon2/src/encoding.c create mode 100644 Blastproof/keygen/argon2/src/encoding.h create mode 100644 Blastproof/keygen/argon2/src/encoding.o create mode 100644 Blastproof/keygen/argon2/src/genkat.c create mode 100644 Blastproof/keygen/argon2/src/genkat.h create mode 100644 Blastproof/keygen/argon2/src/opt.c create mode 100644 Blastproof/keygen/argon2/src/opt.o create mode 100644 Blastproof/keygen/argon2/src/ref.c create mode 100644 Blastproof/keygen/argon2/src/run.c create mode 100644 Blastproof/keygen/argon2/src/test.c create mode 100644 Blastproof/keygen/argon2/src/thread.c create mode 100644 Blastproof/keygen/argon2/src/thread.h create mode 100644 Blastproof/keygen/argon2/src/thread.o create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2Opt/Argon2Opt.vcxproj create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2Opt/Argon2Opt.vcxproj.filters create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2OptBench/Argon2OptBench.vcxproj create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2OptBench/Argon2OptBench.vcxproj.filters create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2OptDll/Argon2OptDll.vcxproj create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2OptDll/Argon2OptDll.vcxproj.filters create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2OptGenKAT/Argon2OptGenKAT.vcxproj create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2OptGenKAT/Argon2OptGenKAT.vcxproj.filters create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2OptTestCI/Argon2OptTestCI.vcxproj create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2OptTestCI/Argon2OptTestCI.vcxproj.filters create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2Ref/Argon2Ref.vcxproj create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2Ref/Argon2Ref.vcxproj.filters create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2RefBench/Argon2RefBench.vcxproj create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2RefBench/Argon2RefBench.vcxproj.filters create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2RefDll/Argon2RefDll.vcxproj create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2RefDll/Argon2RefDll.vcxproj.filters create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2RefGenKAT/Argon2RefGenKAT.vcxproj create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2RefGenKAT/Argon2RefGenKAT.vcxproj.filters create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2RefTestCI/Argon2RefTestCI.vcxproj create mode 100644 Blastproof/keygen/argon2/vs2015/Argon2RefTestCI/Argon2RefTestCI.vcxproj.filters create mode 100755 Blastproof/keygen/build.sh create mode 100644 Blastproof/keygen/context.h create mode 100644 Blastproof/keygen/fors.c create mode 100644 Blastproof/keygen/fors.h create mode 100644 Blastproof/keygen/fors.o create mode 100644 Blastproof/keygen/hash.h create mode 100644 Blastproof/keygen/hash_sha2.c create mode 100644 Blastproof/keygen/hash_sha2.o create mode 100644 Blastproof/keygen/keygen.cpp create mode 100644 Blastproof/keygen/merkle.c create mode 100644 Blastproof/keygen/merkle.h create mode 100644 Blastproof/keygen/merkle.o create mode 100644 Blastproof/keygen/params.h create mode 100644 Blastproof/keygen/params/params-sphincs-sha2-256f.h create mode 100644 Blastproof/keygen/randombytes.c create mode 100644 Blastproof/keygen/randombytes.h create mode 100644 Blastproof/keygen/randombytes.o create mode 100644 Blastproof/keygen/sha2.c create mode 100644 Blastproof/keygen/sha2.h create mode 100644 Blastproof/keygen/sha2.o create mode 100644 Blastproof/keygen/sha2_offsets.h create mode 100644 Blastproof/keygen/sha3.c create mode 100644 Blastproof/keygen/sha3.h create mode 100644 Blastproof/keygen/sha3.o create mode 100644 Blastproof/keygen/sign.c create mode 100644 Blastproof/keygen/sign.o create mode 100644 Blastproof/keygen/thash.h create mode 100644 Blastproof/keygen/thash_sha2_robust.c create mode 100644 Blastproof/keygen/thash_sha2_robust.o create mode 100644 Blastproof/keygen/thash_sha2_simple.c create mode 100644 Blastproof/keygen/utils.c create mode 100644 Blastproof/keygen/utils.h create mode 100644 Blastproof/keygen/utils.o create mode 100644 Blastproof/keygen/utilsx1.c create mode 100644 Blastproof/keygen/utilsx1.h create mode 100644 Blastproof/keygen/utilsx1.o create mode 100644 Blastproof/keygen/wots.c create mode 100644 Blastproof/keygen/wots.h create mode 100644 Blastproof/keygen/wots.o create mode 100644 Blastproof/keygen/wotsx1.c create mode 100644 Blastproof/keygen/wotsx1.h create mode 100644 Blastproof/keygen/wotsx1.o create mode 100644 Blastproof/src/keycard.asm create mode 100644 Blastproof/src/libs/argon2/argon2.c create mode 100644 Blastproof/src/libs/argon2/argon2.h create mode 100644 Blastproof/src/libs/argon2/blake2-impl.h create mode 100644 Blastproof/src/libs/argon2/blake2.h create mode 100644 Blastproof/src/libs/argon2/blake2b.c create mode 100644 Blastproof/src/libs/argon2/blamka-round-opt.h create mode 100644 Blastproof/src/libs/argon2/blamka-round-ref.h create mode 100644 Blastproof/src/libs/argon2/core.c create mode 100644 Blastproof/src/libs/argon2/core.h create mode 100644 Blastproof/src/libs/argon2/encoding.c create mode 100644 Blastproof/src/libs/argon2/encoding.h create mode 100644 Blastproof/src/libs/argon2/opt.c create mode 100644 Blastproof/src/libs/argon2/thread.c create mode 100644 Blastproof/src/libs/argon2/thread.h create mode 100644 Blastproof/src/libs/include/conf.h create mode 100644 Blastproof/src/libs/include/console.h create mode 100644 Blastproof/src/libs/include/cpu.h create mode 100644 Blastproof/src/libs/include/crypto.h create mode 100644 Blastproof/src/libs/include/debug.h create mode 100644 Blastproof/src/libs/include/default.h create mode 100644 Blastproof/src/libs/include/disk.h create mode 100644 Blastproof/src/libs/include/font.h create mode 100644 Blastproof/src/libs/include/graphic.h create mode 100644 Blastproof/src/libs/include/initfs.h create mode 100644 Blastproof/src/libs/include/ui.h create mode 100644 Blastproof/src/libs/include/vyx.h create mode 100644 Blastproof/src/libs/sha3/sha3.c create mode 100644 Blastproof/src/libs/sha3/sha3.h create mode 100644 Blastproof/src/libs/sphincsplus/address.c create mode 100644 Blastproof/src/libs/sphincsplus/address.h create mode 100644 Blastproof/src/libs/sphincsplus/api.h create mode 100644 Blastproof/src/libs/sphincsplus/context.h create mode 100644 Blastproof/src/libs/sphincsplus/fors.c create mode 100644 Blastproof/src/libs/sphincsplus/fors.h create mode 100644 Blastproof/src/libs/sphincsplus/hash.h create mode 100644 Blastproof/src/libs/sphincsplus/hash_sha2.c create mode 100644 Blastproof/src/libs/sphincsplus/merkle.c create mode 100644 Blastproof/src/libs/sphincsplus/merkle.h create mode 100644 Blastproof/src/libs/sphincsplus/params.h create mode 100644 Blastproof/src/libs/sphincsplus/params/params-sphincs-sha2-256f.h create mode 100644 Blastproof/src/libs/sphincsplus/sha2.c create mode 100644 Blastproof/src/libs/sphincsplus/sha2.h create mode 100644 Blastproof/src/libs/sphincsplus/sha2_offsets.h create mode 100644 Blastproof/src/libs/sphincsplus/sign.c create mode 100644 Blastproof/src/libs/sphincsplus/thash.h create mode 100644 Blastproof/src/libs/sphincsplus/thash_sha2_robust.c create mode 100644 Blastproof/src/libs/sphincsplus/thash_sha2_simple.c create mode 100644 Blastproof/src/libs/sphincsplus/utils.c create mode 100644 Blastproof/src/libs/sphincsplus/utils.h create mode 100644 Blastproof/src/libs/sphincsplus/utilsx1.c create mode 100644 Blastproof/src/libs/sphincsplus/utilsx1.h create mode 100644 Blastproof/src/libs/sphincsplus/wots.c create mode 100644 Blastproof/src/libs/sphincsplus/wots.h create mode 100644 Blastproof/src/libs/sphincsplus/wotsx1.c create mode 100644 Blastproof/src/libs/sphincsplus/wotsx1.h create mode 100644 Blastproof/src/libs/src/conf.c create mode 100644 Blastproof/src/libs/src/console.c create mode 100644 Blastproof/src/libs/src/cpu.c create mode 100644 Blastproof/src/libs/src/crypto.c create mode 100644 Blastproof/src/libs/src/debug.c create mode 100644 Blastproof/src/libs/src/disk.c create mode 100644 Blastproof/src/libs/src/font.c create mode 100644 Blastproof/src/libs/src/graphic.c create mode 100644 Blastproof/src/libs/src/initfs.c create mode 100644 Blastproof/src/libs/src/ui.c create mode 100644 Blastproof/src/libs/src/vyx.c create mode 100644 Blastproof/src/main.c create mode 100644 Blastproof/src/main.inf create mode 100755 build.sh create mode 100644 docs/blastproof/bootconfig.md create mode 100644 docs/blastproof/bootprocess.md create mode 100644 docs/blastproof/bplib.md create mode 100644 docs/blastproof/fbm.md create mode 100644 docs/blastproof/fs.md create mode 100644 docs/blastproof/index.md create mode 100644 docs/blastproof/sbfie.md create mode 100644 docs/blastproof/security.md create mode 100644 docs/boottools/bootanim.md create mode 100644 docs/boottools/fontgen.md create mode 100644 docs/boottools/index.md create mode 100644 docs/boottools/initfsgen.md create mode 100644 docs/boottools/keygen.md create mode 100644 docs/build.md create mode 100644 docs/index.md create mode 100644 docs/kerneltools/index.md create mode 100644 docs/kerneltools/payloads.md create mode 100644 docs/kerneltools/vmemcheck.md create mode 100644 docs/licenses.md create mode 100644 docs/roadmap.md create mode 100644 docs/shelter/bench.md create mode 100644 docs/shelter/bootconfig.md create mode 100644 docs/shelter/bootcontract.md create mode 100644 docs/shelter/bootprocess.md create mode 100644 docs/shelter/cpu/asmint.md create mode 100644 docs/shelter/cpu/cpuabstract.md create mode 100644 docs/shelter/cpu/serial.md create mode 100644 docs/shelter/cpu/tsc.md create mode 100644 docs/shelter/index.md create mode 100644 docs/shelter/kernel/config.md create mode 100644 docs/shelter/kernel/kernel.md create mode 100644 docs/shelter/kernel/log.md create mode 100644 docs/shelter/kernel/testutils.md create mode 100644 docs/shelter/keycard.md create mode 100644 docs/shelter/memmap.md create mode 100644 docs/shelter/memory/heap.md create mode 100644 docs/shelter/memory/index.md create mode 100644 docs/shelter/memory/page.md create mode 100644 docs/shelter/memory/pba.md create mode 100644 docs/shelter/memory/pez.md create mode 100644 docs/shelter/memory/radix.md create mode 100644 docs/shelter/memory/ring.md create mode 100644 docs/shelter/memory/slabs.md create mode 100644 docs/shelter/memory/vmemlayout.md create mode 100644 docs/shelter/naming.md create mode 100644 docs/shelter/ptp.md create mode 100644 docs/shelter/std/malloc.md create mode 100644 docs/shelter/std/mem.md create mode 100644 docs/shelter/std/status.md create mode 100644 docs/shelter/std/std.md create mode 100644 docs/shelter/std/types.md create mode 100644 docs/shelter/tab.md create mode 100644 docs/vyld/index.md create mode 100644 docs/vyld/vyld.md create mode 100644 docs/vyld/vyx.md create mode 100644 licenses/MIT.md create mode 100644 licenses/MPL-2.0.md create mode 100644 licenses/third-party/bsd-2-clause-patent_edk2.md create mode 100644 licenses/third-party/cc0-1.0_argon2.md create mode 100644 licenses/third-party/mit-0_sphincsplus.md create mode 100644 licenses/third-party/mit_elfio.md create mode 100644 licenses/third-party/mit_tiny_sha3.md create mode 100644 readme.md create mode 100644 shelter/lib/include/cpu/asm.h create mode 100644 shelter/lib/include/cpu/serial.h create mode 100644 shelter/lib/include/cpu/tsc.h create mode 100644 shelter/lib/include/kernel/conf.h create mode 100644 shelter/lib/include/kernel/log.h create mode 100644 shelter/lib/include/kernel/test.h create mode 100644 shelter/lib/include/kernel/tests/test_malloc.h create mode 100644 shelter/lib/include/kernel/tests/test_pez.h create mode 100644 shelter/lib/include/kernel/tests/test_radix.h create mode 100644 shelter/lib/include/kernel/tests/test_slabs.h create mode 100644 shelter/lib/include/kernel/tests/test_utils.h create mode 100644 shelter/lib/include/memory/heap.h create mode 100644 shelter/lib/include/memory/page.h create mode 100644 shelter/lib/include/memory/pba.h create mode 100644 shelter/lib/include/memory/pez/pez.h create mode 100644 shelter/lib/include/memory/pez/pez_debug.h create mode 100644 shelter/lib/include/memory/pez/radix.h create mode 100644 shelter/lib/include/memory/ring.h create mode 100644 shelter/lib/include/memory/slab.h create mode 100644 shelter/lib/include/memory/slabs/slab_generic.h create mode 100644 shelter/lib/include/memory/slabs/slab_radix_node.h create mode 100644 shelter/lib/include/memory/slabs/slab_reg_phys.h create mode 100644 shelter/lib/include/memory/slabs/slab_reg_virt.h create mode 100644 shelter/lib/include/memory/vmem_layout.h create mode 100644 shelter/lib/include/std/malloc.h create mode 100644 shelter/lib/include/std/mem.h create mode 100644 shelter/lib/include/std/status.h create mode 100644 shelter/lib/include/std/stdlib.h create mode 100644 shelter/lib/include/std/type.h create mode 100644 shelter/lib/src/cpu/serial.c create mode 100644 shelter/lib/src/cpu/tsc.c create mode 100644 shelter/lib/src/kernel/conf.c create mode 100644 shelter/lib/src/kernel/log.c create mode 100644 shelter/lib/src/kernel/tests/test_malloc.c create mode 100644 shelter/lib/src/kernel/tests/test_pez.c create mode 100644 shelter/lib/src/kernel/tests/test_radix.c create mode 100644 shelter/lib/src/kernel/tests/test_slabs.c create mode 100644 shelter/lib/src/kernel/tests/test_utils.c create mode 100644 shelter/lib/src/memory/heap.c create mode 100644 shelter/lib/src/memory/page.c create mode 100644 shelter/lib/src/memory/pba.c create mode 100644 shelter/lib/src/memory/pez/pez.c create mode 100644 shelter/lib/src/memory/pez/pez_debug.c create mode 100644 shelter/lib/src/memory/pez/radix.c create mode 100644 shelter/lib/src/memory/ring.c create mode 100644 shelter/lib/src/memory/slabs/slab_generic.c create mode 100644 shelter/lib/src/memory/slabs/slab_radix_node.c create mode 100644 shelter/lib/src/memory/slabs/slab_reg_phys.c create mode 100644 shelter/lib/src/memory/slabs/slab_reg_virt.c create mode 100644 shelter/lib/src/std/malloc.c create mode 100644 shelter/lib/src/std/mem.c create mode 100644 shelter/main.c create mode 100644 shelter/tools/checker/vmem_layout_check.py create mode 100644 shelter/tools/generator/malloc_payload_gen.py create mode 100644 shelter/tools/generator/pez_alloc_free_payload_gen.py create mode 100644 shelter/tools/generator/radix_tree_payload_gen.py create mode 100644 vyld/_vyx_start.c create mode 100644 vyld/elfio.hpp create mode 100644 vyld/elfio/elf_types.hpp create mode 100644 vyld/elfio/elfio_array.hpp create mode 100644 vyld/elfio/elfio_dump.hpp create mode 100644 vyld/elfio/elfio_dynamic.hpp create mode 100644 vyld/elfio/elfio_header.hpp create mode 100644 vyld/elfio/elfio_modinfo.hpp create mode 100644 vyld/elfio/elfio_note.hpp create mode 100644 vyld/elfio/elfio_relocation.hpp create mode 100644 vyld/elfio/elfio_section.hpp create mode 100644 vyld/elfio/elfio_segment.hpp create mode 100644 vyld/elfio/elfio_strings.hpp create mode 100644 vyld/elfio/elfio_symbols.hpp create mode 100644 vyld/elfio/elfio_utils.hpp create mode 100644 vyld/elfio/elfio_version.hpp create mode 100644 vyld/elfio/elfio_versym.hpp create mode 100644 vyld/vyld.cpp diff --git a/Blastproof/blastproof.dsc b/Blastproof/blastproof.dsc new file mode 100644 index 0000000..a64ade0 --- /dev/null +++ b/Blastproof/blastproof.dsc @@ -0,0 +1,31 @@ +[Defines] + DSC_SPECIFICATION = 0x0001001C + PLATFORM_GUID = 040262ee-aebc-11f0-ba2c-10ffe08423a6 + PLATFORM_VERSION = 0.1 + PLATFORM_NAME = BlastproofEnv + SKUID_IDENTIFIER = DEFAULT + SUPPORTED_ARCHITECTURES = X64 + BUILD_TARGETS = RELEASE + +[Components] + Blastproof/src/main.inf + +[LibraryClasses] + UefiApplicationEntryPoint|MdePkg/Library/UefiApplicationEntryPoint/UefiApplicationEntryPoint.inf + UefiBootServicesTableLib|MdePkg/Library/UefiBootServicesTableLib/UefiBootServicesTableLib.inf + DebugLib|MdePkg/Library/UefiDebugLibConOut/UefiDebugLibConOut.inf + BaseLib|MdePkg/Library/BaseLib/BaseLib.inf + PcdLib|MdePkg/Library/BasePcdLibNull/BasePcdLibNull.inf + BaseMemoryLib|MdePkg/Library/BaseMemoryLib/BaseMemoryLib.inf + RegisterFilterLib|MdePkg/Library/RegisterFilterLibNull/RegisterFilterLibNull.inf + PrintLib|MdePkg/Library/BasePrintLib/BasePrintLib.inf + DebugPrintErrorLevelLib|MdePkg/Library/BaseDebugPrintErrorLevelLib/BaseDebugPrintErrorLevelLib.inf + StackCheckLib|MdePkg/Library/StackCheckLib/StackCheckLib.inf + StackCheckFailureHookLib|MdePkg/Library/StackCheckFailureHookLibNull/StackCheckFailureHookLibNull.inf + UefiLib|MdePkg/Library/UefiLib/UefiLib.inf + MemoryAllocationLib|MdePkg/Library/UefiMemoryAllocationLib/UefiMemoryAllocationLib.inf + DevicePathLib|MdePkg/Library/UefiDevicePathLib/UefiDevicePathLib.inf + UefiRuntimeServicesTableLib|MdePkg/Library/UefiRuntimeServicesTableLib/UefiRuntimeServicesTableLib.inf + +[Packages] + MdePkg/MdePkg.dec diff --git a/Blastproof/bootanim/bootanim.cpp b/Blastproof/bootanim/bootanim.cpp new file mode 100644 index 0000000..64f0751 --- /dev/null +++ b/Blastproof/bootanim/bootanim.cpp @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: MPL-2.0 +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" +#include +#include +#include +#include +#include +#include +#include +#include +using namespace std; +namespace fs=filesystem; +struct point { + long double x; + long double y; +}; +struct pixel { + uint16_t x; + uint16_t y; +}; +const point AP0={0,0}; +const point AP1={0.35,0.05}; +const point AP2={0.65,1.0}; +const point AP3={1,1}; +const point BP0={0,0}; +const point BP1={0.1,1.5}; +const point BP2={0.3,1.0}; +const point BP3={1,1}; +long double bezier_1d(long double p0,long double p1,long double p2,long double p3,long double t) { + long double u=1.0-t; + return (u*u*u)*p0+3*(u*u)*t*p1+3*u*(t*t)*p2+(t*t*t)*p3; +} +long double find_t(long double p0,long double p1,long double p2,long double p3,long double x_search) { + long double low=0.0; + long double high=1.0; + long double mid; + for (int i=0;i<20;i++) { + mid=(low+high)/2.0; + long double x_mid=bezier_1d(p0,p1,p2,p3,mid); + if (x_mid apply_bezier(point start,point p0,point p1,point p2,point p3,point end,int frames) { + point vect; + vect.x=end.x-start.x; + vect.y=end.y-start.y; + vector out; + out.reserve(frames); + for (int frame=0;frame& white_pixels) { + vector img(w*h*3,0); + for (auto& p:white_pixels) { + if (p.xmax) return max; + return (uint16_t)v; +} +int main (int argc,char **argv) { + if (argc!=5) { + cout<<"[Bootanim] Error: invalid argument."< logopixels(true_w*true_h,0); + int offset_x=(true_w-w_image)/2; + int offset_y=(true_h-h_image)/2; + for (int y=0;y=0 && dst_x=0 && dst_y endco; + for (int i=0;i127) { + point p; + p.x=i%true_w; + p.y=i/true_w; + endco.push_back(p); + } + } + random_device rd; + mt19937 gen(rd()); + vector randco; + randco.reserve(endco.size()); + uniform_real_distribution dist_angle(0.0L,2.0L*M_PI); + uniform_real_distribution dist_r1(0.0L,min((long double)true_w,(long double)true_h)/2); + uniform_real_distribution dist_r2(min((long double)true_w,(long double)true_h)/2,(max((long double)true_w,(long double)true_h)+300)/2); + uniform_int_distribution dist_c(0,1); + long double cx=(long double)true_w/2.0; + long double cy=(long double)true_h/2.0; + for (int i=0;i> codata(301); + for (int i=0;i> tempdata1; + vector> tempdata2; + tempdata1.reserve(endco.size()); + tempdata2.reserve(endco.size()); + for (int i=0;i> pixeldata(81); + for (int i=0;i<80;i++) { + for (int y=0;y data(pixeldata.size()*pixeldata[0].size()*sizeof(pixel)); + size_t offset=0; + for (size_t i=0;i=width-1 || px.x==0) { + px.x=0xFFFF; + } + if (px.y>=height-1 || px.y==0) { + px.y=0xFFFF; + } + memcpy(&data[offset],&px,sizeof(pixel)); + offset+=sizeof(pixel); + } + } + ofstream file("bootanim.bin",ios::binary); + unsigned char magic[8]={'B','o','o','t','A','n','i','m'}; + file.write((char*)magic,8); + vector header(4); + header[0]=width; + header[1]=height; + header[2]=pixeldata.size(); + header[3]=pixeldata[0].size(); + vector headerdata(4*8); + memcpy(headerdata.data(),header.data(),headerdata.size()); + file.write((char *)headerdata.data(),headerdata.size()); + file.write((char *)data.data(),data.size()); + file.close(); + cout<<"[Bootanim] Successfully build boot animation as bootanim.bin."<^}=$T`cLPeV23%t{BRw1$L; zC~~Y}!-s{ip*cjTzK?#t-@m`VzkfWh=f1D|dhYAGulM`9?)$lu&YiK^E4E)uKtNzG z9(R%;ARtKDc|?Q(NlddrH}Db-z&Qj72<(yCc?1O>%(u7R=q>zWO%_=)T_d0#u#~dA-X7Q6ri}#;CXn z861W>lG??fhpX)cd2c8uX#BhVZe7GD4V#U2wNIm)gEqUtL05W;7Mp%n-Si(b8Ps<< zEl^$hJ6E761_VM&3V~x(#6$!vq@e=Ins5PWTeN^?NsPd`lY)XpiT_Oszs)PAR(j0~ zn|^+0cir+rHUIv#HO(Dzy_Mh7(}Q%qwaxDUJ4CMbTMfQVkSN<)UpiYDx%~dd(s*N+ z+)7IC@BaS&8`06Iis{R9L(dF*gt*2iR8#1M$J)#dc6eChlFU*|;jLdkIMZEJxpKeu z-rvi=Rr%_dqxii)KWjIBZaVh*H7EFj(PNEYKdSh(O)I_44xgU=HkVp6jrh*fWwvH< z9v@17()??ao3yz&TCaDMco(#MsP$4){bP-SY}3$h6U(md*Z8Nue6G6GYmk-rwdm&3 zc>X=cDLJNu`Svkes)UhsQo@&Ur~15tf-`>hYNV5!8w<7n(kNhr zhrBB=l$V9MUUy0b^^MmRp;eZho&7l)UYUi)ynfH2Cg4F4a);(Zkdk@9bIdDM9CF6&u!v0Z2 zFsS}AV3dHnwQaNFt0rk+i{{8xxi%WNRb6OM-gHm0NAX>TKFDBf6x@YOu*=mqo+_m$ ztoS~0c$fr+@gVQ9VsluUM7t`L2jFu&8-1HGZdy{*aS6myi<%mAd={xEiWrv9vQp4` z_b`Hv-|W<|ly%H~arF=h3{0137(s*D<sW1%cgt~RU2%dhMtEni(70S zAmNiEFT4pj7;?JHdb3ka%u1p+FjpJS3SSA(aXrH~%tT#J>Vies_b^wntA_RWlmQ=R zN~g`J_t+s-ekhXjbG;q6W>jQm1_uK#8;T+}9^Q?W&Pwnsu=b}?LbB1Au3g4vnV9PU zo~R5(vj^b=7X*QiBn~>P{Wvf+fMqWRyBz!z>_pc$73DKLo9!3oJM=Am>7vA*{olYP z#EJdbc)A?)Y<5?)c8Uk>UP`xn&XVsqJ|)Gqe21UBOFSU?oWYXz4n#tx^h?=<20?>2-j( zs{<_~=gsHQY6w6|MIbobzjh$Mo(QUZdBb?X3TY<3_u4?z|KzSojp?~Us&`cV$;rg^ zA0k4uEieqoQRTSqUh_fc@R7CWY03cNhS@XlxbBX-YW1shuEzl^f^s+{eriH)C35>k zw-?>p7Ar&9_{^pHM^#1?CU99nNvk@n>0&GpP6>se>Kj*YrF#Y?8LZU_dLXcB6sW5+ zE1a8xQKbH|wFJOsg;*U{bg%nk8k%Vy{j#3pX=rPtVsRKAmU>LT?tQ=?F2{1myXq>< zs2fQ6wfHz?V9BeJc6l3c=5$H>7*44^RUar8wL|s)&Qvx3dhly|rFU-fZGzMqPzwwJ zTC2fUfx4!fc6AthT`l_K4>VIP?w^jhr213?zyzfiJwBD3z3IH(bWl!xhO?wha?Zlc z2aRogq-8jF>ogiff(f@{&_p-PP4IeAvNu>?>lO5p7iJWa}%-#>KJVg?gjRP;;RvGeCFp+qZzn z(wD7HA>)PpAH|HqD|^1rJn^Ha$Xb~(x-?N+10BGu2LSio??}Vu-riLU+qeJqSoNNV zjNs_#=n=`O)2|9_@13#W91kwQN~dc#ZCvJDnL#t4LWnEa$7Sm#iOxd>)^yv#yu8z+ z-I7dM4z}I@uin9M%71QbgsSYG^Du;UiUS<&`&%r~3$Mr@xo8@q zN|Fw+z8YmlgU+dDXe#tif88t@!@-3{yB%C5nnxdIn2tu-Sqf?uRn#^yt0Hc?L?~vW z=nMA($ro^3uc!(|lO6u2gRHUUJ6)y@cYHpLlpgvUco-#T>4r;36RrD29oxk`)tT zozcMe^2qY4yV9K2Hq#RBDgJOFST}q2X;ROT`OfIEhSf}~*H$XeYs^JG)d@J4k*WwP zYWZ7jP6k+4Rg2b?YL}NI|3TFt)so<)DAt)u_hHJlt0v>r`32Qa4R#Y7R<4tetD0cv zQ(U+16Wp8~NF!f=~5zSYmfBt&5G@lanes4T**g<|it^Pf7WBX{A zB(oL35B5SJC}iqnixCPHa{fMrF*c2d?|Gqzpzh#}zKF}Qg{-_vE zI_ofSmU_5RtD3H9X6l*%s!<^}Rv#fYaF-#T7OZf0hpdiQ2+Cc&GEgi%qQp8slpDl( zwT*fEqna;=rB`2+ZO&5gYI$8kwBy?sf(fKaOb)fU1#m@-2O<4vE2`w!CV&Z<`O4Kw z*w|n^TxLWbwDp`@iWH^k5*yE1vadzKa9#y*XnAbxnil*nFx;?G|Gq`?A_%Z<$O;Lf zpkdW|V21aQl_CrW3yFWT!0EUMKo>IwCn4PHpx3;8jsZ}~1AgCUj71_So;2w2Y4$Y~ z42L0zytdiw#DnmiG8*3u3X}4rDP+K}TNdnV5b>-G)p8m{uGKGIcqI#+HNe~xFZ?1K zo%KSGY&)`+SqRR`OGrrl(B~En!|kH4=>o?*A_cvF!a<}|M}JsYh$a}KbPTq%9%9sb zmCU$gSxl@w2*auJAnssR2wR1u-_`>S*NCE9fP51tG2yaH#P-soFkCEsZ6H@u;koaz zXhgrbDD4D6q%n!%(q3u?!}?$V4Eq`zL5cU}KJUZMQ8jtH7DCkfzWCI*2IVnKPl+H zCLKg7s=1{~Fq{>qchDPV9&Dj*tfF!Zevy51)onHOdWp9!DzU+kz@PND)xtx~q z3kT0nlIC1ft^qJR3&WMs*RtY@TcYvs`~jx5Y(L{!=qesGmm`fKvO>1hNjJJ>G(KJN zsrLUJwzhe##F~%(*^_TV5V_D+km29-<4TDuV5F>q>PTPs-Ya?WD@2YETil|9gU43u zkze|-4<>+hMI=5sq_`-3p`e9JDyR>A*#}d=y3n_AM{L-O_6SPLuzV|lmciGA$M!S3 z2;$=6k5*Pzk^+rd_op&0-Gjcr$lnLl1GpBsUVgtqHqbJ`=KPItZ1*?~>wH6BtI@{B zdSs)8A5~DJM%Hu;L_GrtA`j^cDkMmcjK&>Z{lqAEEWiPnx3~9xod9wi<5EZ&kvbu= z=WQIXtx}>A2j9-fSN;dPIgt!*{T*7=Lo8Wy;_q~v=xfU;R!IFB(B#qhJz+}324@Y@ zH*phe-I+}{DIxMrAMW(4x}`%`%qMKpt3wkQm&T=&&hy=*M9FD1Xr&K(Itl?bsL><4 z`A~9k@KK!8QXOw|!T=^>IKVVP<_g!Rfmi|ro_ImsKP{`hzy<6zIyZOo< z&$+QgYYUMCzYr{GN^?BGm;4=e6}vn(#_I49HzW@$q_Br9_AtsalWN_%o?B zFd!fx?7S#D3q5>)vh~+Vb|t5!5&=yfmLHJ7#@@+7YjWiL%)2Acof0g@ZD0M&WH;fm zcF1@qfNY`jHf~{%(S-u^N8|?#u(3RAkh?!Cob55R8_3<^T*mnKyu3V3MV<}FU6z0| zp)csliIN@aIJAQyQOD%A&^Z>S=b9td7gy)X3eQZ*%weBefPT_ z$|%q@6eW+0$Y*7*wu~a6SH7m2DK((%Z-ZKSe}2rTpmD0{Rc_bBL{g49=K%ZBDWxMV zdQe#pZ(LNoutOHO^)j#x_4dK7326J&eC6Qwf{eNqv3nem6boR}v0@{wv(ff*yo&`89g_rm!DJW>dkDLLy%do_1$PpZz#!1dzA7oe?d?!(o^ObW))=pTaSL3XlmKr!s&vJgooK|qpOwU*LPXUTk zY$^geP@^|>oY>&0Mxxx)yM>LNQYTSZoy5kFC`vvUb5&JBj-`(afr-qe)#WNLdma&!J!m3n87q^!+8-@0>bWCcJZDV4a?Z)g`b z@@OX7=hPChabWkKR<9P2bdIW|zC6Xhank9rv9Xh{KKcM*3<`hE@oc;3#VSjq&a5)@ zcGRXtQqOXpTsZ`@`YwU*!;d61m~8+#EC*FQYaks_!4U|hb0NWHhyC*Znz z#UXnK{t_k6yc}lw4ed6gpnWc-yp7Y)&`93>F&jOLu{Oc-HjCt7A}&6@(B4XkaCZs? zNw3*irCA`^!q9Oa@)N`Qtb%)g8qWkvUl=$bN?!CNa_^#M)4-UUx{sg@sM$;q=H?%w zIshlBFNbH-RFAxpUiSe0b07t#Vb_g~L>l|wSyO9xkbZ`BF9Q1NZgGX(W|4ORsjq@% zg1y|Ez&Wuaz8K`4$Z!eQAoaa;G{Hjl>Vibc%H_t+`z*K<6KKKW2$%@gKB(?kL4&4Z zDjMXz&yNQIDnlj?4)I9KR|awp7E6YB%%EDGZo-&YGiJ1Ug##BUcK)F!34|s zk<4lFK|tg6Z#q>KpRh!bj?s+&2FzNjk&cxc6961`ODJl>A$UP|`yd`9kd0IweAiX| zeQYPL{dhQpSjf0E7uz3}*wc6FW_(*uaC_{IxXN?Kc?#O?>y9UH{9q#69&A8!*QIRq zgc2Jf_wJS@KV}F44Sj2~6ZDu`Jp5by@K=5lIq=2}eL??*Z_CS@>%qWnTZ$ci>dz(G zo^KOuu~BB`GUlq2Mm~-*>b4|6PA({sukz;(fmVp}rn)Nh^T2ICHWR H<97Q$1L9&| literal 0 HcmV?d00001 diff --git a/Blastproof/bootanim/stb_image.h b/Blastproof/bootanim/stb_image.h new file mode 100644 index 0000000..b536203 --- /dev/null +++ b/Blastproof/bootanim/stb_image.h @@ -0,0 +1,7988 @@ +/* stb_image - v2.30 - public domain image loader - http://nothings.org/stb + no warranty implied; use at your own risk + + Do this: + #define STB_IMAGE_IMPLEMENTATION + before you include this file in *one* C or C++ file to create the implementation. + + // i.e. it should look like this: + #include ... + #include ... + #include ... + #define STB_IMAGE_IMPLEMENTATION + #include "stb_image.h" + + You can #define STBI_ASSERT(x) before the #include to avoid using assert.h. + And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free + + + QUICK NOTES: + Primarily of interest to game developers and other people who can + avoid problematic images and only need the trivial interface + + JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) + PNG 1/2/4/8/16-bit-per-channel + + TGA (not sure what subset, if a subset) + BMP non-1bpp, non-RLE + PSD (composited view only, no extra channels, 8/16 bit-per-channel) + + GIF (*comp always reports as 4-channel) + HDR (radiance rgbE format) + PIC (Softimage PIC) + PNM (PPM and PGM binary only) + + Animated GIF still needs a proper API, but here's one way to do it: + http://gist.github.com/urraka/685d9a6340b26b830d49 + + - decode from memory or through FILE (define STBI_NO_STDIO to remove code) + - decode from arbitrary I/O callbacks + - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) + + Full documentation under "DOCUMENTATION" below. + + +* + + See end of file for license information. + +RECENT REVISION HISTORY: + + 2.30 (2024-05-31) avoid erroneous gcc warning + 2.29 (2023-05-xx) optimizations + 2.28 (2023-01-29) many error fixes, security errors, just tons of stuff + 2.27 (2021-07-11) document stbi_info better, 16-bit PNM support, bug fixes + 2.26 (2020-07-13) many minor fixes + 2.25 (2020-02-02) fix warnings + 2.24 (2020-02-02) fix warnings; thread-local failure_reason and flip_vertically + 2.23 (2019-08-11) fix clang static analysis warning + 2.22 (2019-03-04) gif fixes, fix warnings + 2.21 (2019-02-25) fix typo in comment + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings + 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes + 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 + RGB-format JPEG; remove white matting in PSD; + allocate large structures on the stack; + correct channel count for PNG & BMP + 2.10 (2016-01-22) avoid warning introduced in 2.09 + 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED + + See end of file for full revision history. + + + ============================ Contributors ========================= + + Image formats Extensions, features + Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) + Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) + Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) + Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) + Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) + Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) + Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) + github:urraka (animated gif) Junggon Kim (PNM comments) + Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) + socks-the-fox (16-bit PNG) + Jeremy Sawicki (handle all ImageNet JPGs) + Optimizations & bugfixes Mikhail Morozov (1-bit BMP) + Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) + Arseny Kapoulkine Simon Breuss (16-bit PNM) + John-Mark Allen + Carmelo J Fdez-Aguera + + Bug & warning fixes + Marc LeBlanc David Woo Guillaume George Martins Mozeiko + Christpher Lloyd Jerry Jansson Joseph Thomson Blazej Dariusz Roszkowski + Phil Jordan Dave Moore Roy Eltham + Hayaki Saito Nathan Reed Won Chun + Luke Graham Johan Duparc Nick Verigakis the Horde3D community + Thomas Ruf Ronny Chevalier github:rlyeh + Janez Zemva John Bartholomew Michal Cichon github:romigrou + Jonathan Blow Ken Hamada Tero Hanninen github:svdijk + Eugene Golushkov Laurent Gomila Cort Stratton github:snagar + Aruelien Pocheville Sergio Gonzalez Thibault Reuille github:Zelex + Cass Everitt Ryamond Barbiero github:grim210 + Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw + Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus + Josh Tobin Neil Bickford Matthew Gregan github:poppolopoppo + Julian Raschke Gregory Mullen Christian Floisand github:darealshinji + Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007 + Brad Weinberger Matvey Cherevko github:mosra + Luca Sas Alexander Veselov Zack Middleton [reserved] + Ryan C. Gordon [reserved] [reserved] + DO NOT ADD YOUR NAME HERE + + Jacko Dirks + + To add your name to the credits, pick a random blank space in the middle and fill it. + 80% of merge conflicts on stb PRs are due to people adding their name at the end + of the credits. +*/ + +#ifndef STBI_INCLUDE_STB_IMAGE_H +#define STBI_INCLUDE_STB_IMAGE_H + +// DOCUMENTATION +// +// Limitations: +// - no 12-bit-per-channel JPEG +// - no JPEGs with arithmetic coding +// - GIF always returns *comp=4 +// +// Basic usage (see HDR discussion below for HDR usage): +// int x,y,n; +// unsigned char *data = stbi_load(filename, &x, &y, &n, 0); +// // ... process data if not NULL ... +// // ... x = width, y = height, n = # 8-bit components per pixel ... +// // ... replace '0' with '1'..'4' to force that many components per pixel +// // ... but 'n' will always be the number that it would have been if you said 0 +// stbi_image_free(data); +// +// Standard parameters: +// int *x -- outputs image width in pixels +// int *y -- outputs image height in pixels +// int *channels_in_file -- outputs # of image components in image file +// int desired_channels -- if non-zero, # of image components requested in result +// +// The return value from an image loader is an 'unsigned char *' which points +// to the pixel data, or NULL on an allocation failure or if the image is +// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, +// with each pixel consisting of N interleaved 8-bit components; the first +// pixel pointed to is top-left-most in the image. There is no padding between +// image scanlines or between pixels, regardless of format. The number of +// components N is 'desired_channels' if desired_channels is non-zero, or +// *channels_in_file otherwise. If desired_channels is non-zero, +// *channels_in_file has the number of components that _would_ have been +// output otherwise. E.g. if you set desired_channels to 4, you will always +// get RGBA output, but you can check *channels_in_file to see if it's trivially +// opaque because e.g. there were only 3 channels in the source image. +// +// An output image with N components has the following components interleaved +// in this order in each pixel: +// +// N=#comp components +// 1 grey +// 2 grey, alpha +// 3 red, green, blue +// 4 red, green, blue, alpha +// +// If image loading fails for any reason, the return value will be NULL, +// and *x, *y, *channels_in_file will be unchanged. The function +// stbi_failure_reason() can be queried for an extremely brief, end-user +// unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS +// to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly +// more user-friendly ones. +// +// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. +// +// To query the width, height and component count of an image without having to +// decode the full file, you can use the stbi_info family of functions: +// +// int x,y,n,ok; +// ok = stbi_info(filename, &x, &y, &n); +// // returns ok=1 and sets x, y, n if image is a supported format, +// // 0 otherwise. +// +// Note that stb_image pervasively uses ints in its public API for sizes, +// including sizes of memory buffers. This is now part of the API and thus +// hard to change without causing breakage. As a result, the various image +// loaders all have certain limits on image size; these differ somewhat +// by format but generally boil down to either just under 2GB or just under +// 1GB. When the decoded image would be larger than this, stb_image decoding +// will fail. +// +// Additionally, stb_image will reject image files that have any of their +// dimensions set to a larger value than the configurable STBI_MAX_DIMENSIONS, +// which defaults to 2**24 = 16777216 pixels. Due to the above memory limit, +// the only way to have an image with such dimensions load correctly +// is for it to have a rather extreme aspect ratio. Either way, the +// assumption here is that such larger images are likely to be malformed +// or malicious. If you do need to load an image with individual dimensions +// larger than that, and it still fits in the overall size limit, you can +// #define STBI_MAX_DIMENSIONS on your own to be something larger. +// +// =========================================================================== +// +// UNICODE: +// +// If compiling for Windows and you wish to use Unicode filenames, compile +// with +// #define STBI_WINDOWS_UTF8 +// and pass utf8-encoded filenames. Call stbi_convert_wchar_to_utf8 to convert +// Windows wchar_t filenames to utf8. +// +// =========================================================================== +// +// Philosophy +// +// stb libraries are designed with the following priorities: +// +// 1. easy to use +// 2. easy to maintain +// 3. good performance +// +// Sometimes I let "good performance" creep up in priority over "easy to maintain", +// and for best performance I may provide less-easy-to-use APIs that give higher +// performance, in addition to the easy-to-use ones. Nevertheless, it's important +// to keep in mind that from the standpoint of you, a client of this library, +// all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all. +// +// Some secondary priorities arise directly from the first two, some of which +// provide more explicit reasons why performance can't be emphasized. +// +// - Portable ("ease of use") +// - Small source code footprint ("easy to maintain") +// - No dependencies ("ease of use") +// +// =========================================================================== +// +// I/O callbacks +// +// I/O callbacks allow you to read from arbitrary sources, like packaged +// files or some other source. Data read from callbacks are processed +// through a small internal buffer (currently 128 bytes) to try to reduce +// overhead. +// +// The three functions you must define are "read" (reads some bytes of data), +// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). +// +// =========================================================================== +// +// SIMD support +// +// The JPEG decoder will try to automatically use SIMD kernels on x86 when +// supported by the compiler. For ARM Neon support, you must explicitly +// request it. +// +// (The old do-it-yourself SIMD API is no longer supported in the current +// code.) +// +// On x86, SSE2 will automatically be used when available based on a run-time +// test; if not, the generic C versions are used as a fall-back. On ARM targets, +// the typical path is to have separate builds for NEON and non-NEON devices +// (at least this is true for iOS and Android). Therefore, the NEON support is +// toggled by a build flag: define STBI_NEON to get NEON loops. +// +// If for some reason you do not want to use any of SIMD code, or if +// you have issues compiling it, you can disable it entirely by +// defining STBI_NO_SIMD. +// +// =========================================================================== +// +// HDR image support (disable by defining STBI_NO_HDR) +// +// stb_image supports loading HDR images in general, and currently the Radiance +// .HDR file format specifically. You can still load any file through the existing +// interface; if you attempt to load an HDR file, it will be automatically remapped +// to LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; +// both of these constants can be reconfigured through this interface: +// +// stbi_hdr_to_ldr_gamma(2.2f); +// stbi_hdr_to_ldr_scale(1.0f); +// +// (note, do not use _inverse_ constants; stbi_image will invert them +// appropriately). +// +// Additionally, there is a new, parallel interface for loading files as +// (linear) floats to preserve the full dynamic range: +// +// float *data = stbi_loadf(filename, &x, &y, &n, 0); +// +// If you load LDR images through this interface, those images will +// be promoted to floating point values, run through the inverse of +// constants corresponding to the above: +// +// stbi_ldr_to_hdr_scale(1.0f); +// stbi_ldr_to_hdr_gamma(2.2f); +// +// Finally, given a filename (or an open file or memory block--see header +// file for details) containing image data, you can query for the "most +// appropriate" interface to use (that is, whether the image is HDR or +// not), using: +// +// stbi_is_hdr(char *filename); +// +// =========================================================================== +// +// iPhone PNG support: +// +// We optionally support converting iPhone-formatted PNGs (which store +// premultiplied BGRA) back to RGB, even though they're internally encoded +// differently. To enable this conversion, call +// stbi_convert_iphone_png_to_rgb(1). +// +// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per +// pixel to remove any premultiplied alpha *only* if the image file explicitly +// says there's premultiplied data (currently only happens in iPhone images, +// and only if iPhone convert-to-rgb processing is on). +// +// =========================================================================== +// +// ADDITIONAL CONFIGURATION +// +// - You can suppress implementation of any of the decoders to reduce +// your code footprint by #defining one or more of the following +// symbols before creating the implementation. +// +// STBI_NO_JPEG +// STBI_NO_PNG +// STBI_NO_BMP +// STBI_NO_PSD +// STBI_NO_TGA +// STBI_NO_GIF +// STBI_NO_HDR +// STBI_NO_PIC +// STBI_NO_PNM (.ppm and .pgm) +// +// - You can request *only* certain decoders and suppress all other ones +// (this will be more forward-compatible, as addition of new decoders +// doesn't require you to disable them explicitly): +// +// STBI_ONLY_JPEG +// STBI_ONLY_PNG +// STBI_ONLY_BMP +// STBI_ONLY_PSD +// STBI_ONLY_TGA +// STBI_ONLY_GIF +// STBI_ONLY_HDR +// STBI_ONLY_PIC +// STBI_ONLY_PNM (.ppm and .pgm) +// +// - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still +// want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB +// +// - If you define STBI_MAX_DIMENSIONS, stb_image will reject images greater +// than that size (in either width or height) without further processing. +// This is to let programs in the wild set an upper bound to prevent +// denial-of-service attacks on untrusted data, as one could generate a +// valid image of gigantic dimensions and force stb_image to allocate a +// huge block of memory and spend disproportionate time decoding it. By +// default this is set to (1 << 24), which is 16777216, but that's still +// very big. + +#ifndef STBI_NO_STDIO +#include +#endif // STBI_NO_STDIO + +#define STBI_VERSION 1 + +enum +{ + STBI_default = 0, // only used for desired_channels + + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 +}; + +#include +typedef unsigned char stbi_uc; +typedef unsigned short stbi_us; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef STBIDEF +#ifdef STB_IMAGE_STATIC +#define STBIDEF static +#else +#define STBIDEF extern +#endif +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// PRIMARY API - works on images of any type +// + +// +// load image by filename, open file, or memory buffer +// + +typedef struct +{ + int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int (*eof) (void *user); // returns nonzero if we are at end of file/data +} stbi_io_callbacks; + +//////////////////////////////////// +// +// 8-bits-per-channel interface +// + +STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +// for stbi_load_from_file, file pointer is left pointing immediately after image +#endif + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +#endif + +#ifdef STBI_WINDOWS_UTF8 +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input); +#endif + +//////////////////////////////////// +// +// 16-bits-per-channel interface +// + +STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif + +//////////////////////////////////// +// +// float-per-channel interface +// +#ifndef STBI_NO_LINEAR + STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + + #ifndef STBI_NO_STDIO + STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + #endif +#endif + +#ifndef STBI_NO_HDR + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); +#endif // STBI_NO_HDR + +#ifndef STBI_NO_LINEAR + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); +#endif // STBI_NO_LINEAR + +// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename); +STBIDEF int stbi_is_hdr_from_file(FILE *f); +#endif // STBI_NO_STDIO + + +// get a VERY brief reason for failure +// on most compilers (and ALL modern mainstream compilers) this is threadsafe +STBIDEF const char *stbi_failure_reason (void); + +// free the loaded image -- this is just free() +STBIDEF void stbi_image_free (void *retval_from_stbi_load); + +// get image dimensions & components without fully decoding +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len); +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit (char const *filename); +STBIDEF int stbi_is_16_bit_from_file(FILE *f); +#endif + + + +// for image formats that explicitly notate that they have premultiplied alpha, +// we just return the colors as stored in the file. set this flag to force +// unpremultiplication. results are undefined if the unpremultiply overflow. +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + +// indicate whether we should process iphone images back to canonical format, +// or just pass them through "as-is" +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + +// flip the image vertically, so the first pixel in the output array is the bottom left +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + +// as above, but only applies to images loaded on the thread that calls the function +// this function is only available if your compiler supports thread-local variables; +// calling it will fail to link if your compiler doesn't +STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply); +STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert); +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip); + +// ZLIB client - used by PNG, available for other purposes + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); +STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + +STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + +#ifdef __cplusplus +} +#endif + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBI_INCLUDE_STB_IMAGE_H + +#ifdef STB_IMAGE_IMPLEMENTATION + +#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ + || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ + || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ + || defined(STBI_ONLY_ZLIB) + #ifndef STBI_ONLY_JPEG + #define STBI_NO_JPEG + #endif + #ifndef STBI_ONLY_PNG + #define STBI_NO_PNG + #endif + #ifndef STBI_ONLY_BMP + #define STBI_NO_BMP + #endif + #ifndef STBI_ONLY_PSD + #define STBI_NO_PSD + #endif + #ifndef STBI_ONLY_TGA + #define STBI_NO_TGA + #endif + #ifndef STBI_ONLY_GIF + #define STBI_NO_GIF + #endif + #ifndef STBI_ONLY_HDR + #define STBI_NO_HDR + #endif + #ifndef STBI_ONLY_PIC + #define STBI_NO_PIC + #endif + #ifndef STBI_ONLY_PNM + #define STBI_NO_PNM + #endif +#endif + +#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) +#define STBI_NO_ZLIB +#endif + + +#include +#include // ptrdiff_t on osx +#include +#include +#include + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#include // ldexp, pow +#endif + +#ifndef STBI_NO_STDIO +#include +#endif + +#ifndef STBI_ASSERT +#include +#define STBI_ASSERT(x) assert(x) +#endif + +#ifdef __cplusplus +#define STBI_EXTERN extern "C" +#else +#define STBI_EXTERN extern +#endif + + +#ifndef _MSC_VER + #ifdef __cplusplus + #define stbi_inline inline + #else + #define stbi_inline + #endif +#else + #define stbi_inline __forceinline +#endif + +#ifndef STBI_NO_THREAD_LOCALS + #if defined(__cplusplus) && __cplusplus >= 201103L + #define STBI_THREAD_LOCAL thread_local + #elif defined(__GNUC__) && __GNUC__ < 5 + #define STBI_THREAD_LOCAL __thread + #elif defined(_MSC_VER) + #define STBI_THREAD_LOCAL __declspec(thread) + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_THREADS__) + #define STBI_THREAD_LOCAL _Thread_local + #endif + + #ifndef STBI_THREAD_LOCAL + #if defined(__GNUC__) + #define STBI_THREAD_LOCAL __thread + #endif + #endif +#endif + +#if defined(_MSC_VER) || defined(__SYMBIAN32__) +typedef unsigned short stbi__uint16; +typedef signed short stbi__int16; +typedef unsigned int stbi__uint32; +typedef signed int stbi__int32; +#else +#include +typedef uint16_t stbi__uint16; +typedef int16_t stbi__int16; +typedef uint32_t stbi__uint32; +typedef int32_t stbi__int32; +#endif + +// should produce compiler error if size is wrong +typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBI_NOTUSED(v) (void)(v) +#else +#define STBI_NOTUSED(v) (void)sizeof(v) +#endif + +#ifdef _MSC_VER +#define STBI_HAS_LROTL +#endif + +#ifdef STBI_HAS_LROTL + #define stbi_lrot(x,y) _lrotl(x,y) +#else + #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (-(y) & 31))) +#endif + +#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) +// ok +#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." +#endif + +#ifndef STBI_MALLOC +#define STBI_MALLOC(sz) malloc(sz) +#define STBI_REALLOC(p,newsz) realloc(p,newsz) +#define STBI_FREE(p) free(p) +#endif + +#ifndef STBI_REALLOC_SIZED +#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) +#endif + +// x86/x64 detection +#if defined(__x86_64__) || defined(_M_X64) +#define STBI__X64_TARGET +#elif defined(__i386) || defined(_M_IX86) +#define STBI__X86_TARGET +#endif + +#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) +// gcc doesn't support sse2 intrinsics unless you compile with -msse2, +// which in turn means it gets to use SSE2 everywhere. This is unfortunate, +// but previous attempts to provide the SSE2 functions with runtime +// detection caused numerous issues. The way architecture extensions are +// exposed in GCC/Clang is, sadly, not really suited for one-file libs. +// New behavior: if compiled with -msse2, we use SSE2 without any +// detection; if not, we don't use it at all. +#define STBI_NO_SIMD +#endif + +#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) +// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET +// +// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the +// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. +// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not +// simultaneously enabling "-mstackrealign". +// +// See https://github.com/nothings/stb/issues/81 for more information. +// +// So default to no SSE2 on 32-bit MinGW. If you've read this far and added +// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. +#define STBI_NO_SIMD +#endif + +#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) +#define STBI_SSE2 +#include + +#ifdef _MSC_VER + +#if _MSC_VER >= 1400 // not VC6 +#include // __cpuid +static int stbi__cpuid3(void) +{ + int info[4]; + __cpuid(info,1); + return info[3]; +} +#else +static int stbi__cpuid3(void) +{ + int res; + __asm { + mov eax,1 + cpuid + mov res,edx + } + return res; +} +#endif + +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; +} +#endif + +#else // assume GCC-style if not VC++ +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + // If we're even attempting to compile this on GCC/Clang, that means + // -msse2 is on, which means the compiler is allowed to use SSE2 + // instructions at will, and so are we. + return 1; +} +#endif + +#endif +#endif + +// ARM NEON +#if defined(STBI_NO_SIMD) && defined(STBI_NEON) +#undef STBI_NEON +#endif + +#ifdef STBI_NEON +#include +#ifdef _MSC_VER +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name +#else +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) +#endif +#endif + +#ifndef STBI_SIMD_ALIGN +#define STBI_SIMD_ALIGN(type, name) type name +#endif + +#ifndef STBI_MAX_DIMENSIONS +#define STBI_MAX_DIMENSIONS (1 << 24) +#endif + +/////////////////////////////////////////////// +// +// stbi__context struct and start_xxx functions + +// stbi__context structure is our basic context used by all images, so it +// contains all the IO context, plus some basic image information +typedef struct +{ + stbi__uint32 img_x, img_y; + int img_n, img_out_n; + + stbi_io_callbacks io; + void *io_user_data; + + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + int callback_already_read; + + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; +} stbi__context; + + +static void stbi__refill_buffer(stbi__context *s); + +// initialize a memory-decode context +static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) +{ + s->io.read = NULL; + s->read_from_callbacks = 0; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len; +} + +// initialize a callback-based context +static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) +{ + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; +} + +#ifndef STBI_NO_STDIO + +static int stbi__stdio_read(void *user, char *data, int size) +{ + return (int) fread(data,1,size,(FILE*) user); +} + +static void stbi__stdio_skip(void *user, int n) +{ + int ch; + fseek((FILE*) user, n, SEEK_CUR); + ch = fgetc((FILE*) user); /* have to read a byte to reset feof()'s flag */ + if (ch != EOF) { + ungetc(ch, (FILE *) user); /* push byte back onto stream if valid. */ + } +} + +static int stbi__stdio_eof(void *user) +{ + return feof((FILE*) user) || ferror((FILE *) user); +} + +static stbi_io_callbacks stbi__stdio_callbacks = +{ + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, +}; + +static void stbi__start_file(stbi__context *s, FILE *f) +{ + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f); +} + +//static void stop_file(stbi__context *s) { } + +#endif // !STBI_NO_STDIO + +static void stbi__rewind(stbi__context *s) +{ + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; +} + +enum +{ + STBI_ORDER_RGB, + STBI_ORDER_BGR +}; + +typedef struct +{ + int bits_per_channel; + int num_channels; + int channel_order; +} stbi__result_info; + +#ifndef STBI_NO_JPEG +static int stbi__jpeg_test(stbi__context *s); +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNG +static int stbi__png_test(stbi__context *s); +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__png_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_BMP +static int stbi__bmp_test(stbi__context *s); +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_TGA +static int stbi__tga_test(stbi__context *s); +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s); +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__psd_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_HDR +static int stbi__hdr_test(stbi__context *s); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_test(stbi__context *s); +static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_GIF +static int stbi__gif_test(stbi__context *s); +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNM +static int stbi__pnm_test(stbi__context *s); +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__pnm_is16(stbi__context *s); +#endif + +static +#ifdef STBI_THREAD_LOCAL +STBI_THREAD_LOCAL +#endif +const char *stbi__g_failure_reason; + +STBIDEF const char *stbi_failure_reason(void) +{ + return stbi__g_failure_reason; +} + +#ifndef STBI_NO_FAILURE_STRINGS +static int stbi__err(const char *str) +{ + stbi__g_failure_reason = str; + return 0; +} +#endif + +static void *stbi__malloc(size_t size) +{ + return STBI_MALLOC(size); +} + +// stb_image uses ints pervasively, including for offset calculations. +// therefore the largest decoded image size we can support with the +// current code, even on 64-bit targets, is INT_MAX. this is not a +// significant limitation for the intended use case. +// +// we do, however, need to make sure our size calculations don't +// overflow. hence a few helper functions for size calculations that +// multiply integers together, making sure that they're non-negative +// and no overflow occurs. + +// return 1 if the sum is valid, 0 on overflow. +// negative terms are considered invalid. +static int stbi__addsizes_valid(int a, int b) +{ + if (b < 0) return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; +} + +// returns 1 if the product is valid, 0 on overflow. +// negative factors are considered invalid. +static int stbi__mul2sizes_valid(int a, int b) +{ + if (a < 0 || b < 0) return 0; + if (b == 0) return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX/b; +} + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow +static int stbi__mad2sizes_valid(int a, int b, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); +} +#endif + +// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow +static int stbi__mad3sizes_valid(int a, int b, int c, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__addsizes_valid(a*b*c, add); +} + +// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) +static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); +} +#endif + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// mallocs with size overflow checking +static void *stbi__malloc_mad2(int a, int b, int add) +{ + if (!stbi__mad2sizes_valid(a, b, add)) return NULL; + return stbi__malloc(a*b + add); +} +#endif + +static void *stbi__malloc_mad3(int a, int b, int c, int add) +{ + if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; + return stbi__malloc(a*b*c + add); +} + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) +static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) +{ + if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; + return stbi__malloc(a*b*c*d + add); +} +#endif + +// returns 1 if the sum of two signed ints is valid (between -2^31 and 2^31-1 inclusive), 0 on overflow. +static int stbi__addints_valid(int a, int b) +{ + if ((a >= 0) != (b >= 0)) return 1; // a and b have different signs, so no overflow + if (a < 0 && b < 0) return a >= INT_MIN - b; // same as a + b >= INT_MIN; INT_MIN - b cannot overflow since b < 0. + return a <= INT_MAX - b; +} + +// returns 1 if the product of two ints fits in a signed short, 0 on overflow. +static int stbi__mul2shorts_valid(int a, int b) +{ + if (b == 0 || b == -1) return 1; // multiplication by 0 is always 0; check for -1 so SHRT_MIN/b doesn't overflow + if ((a >= 0) == (b >= 0)) return a <= SHRT_MAX/b; // product is positive, so similar to mul2sizes_valid + if (b < 0) return a <= SHRT_MIN / b; // same as a * b >= SHRT_MIN + return a >= SHRT_MIN / b; +} + +// stbi__err - error +// stbi__errpf - error returning pointer to float +// stbi__errpuc - error returning pointer to unsigned char + +#ifdef STBI_NO_FAILURE_STRINGS + #define stbi__err(x,y) 0 +#elif defined(STBI_FAILURE_USERMSG) + #define stbi__err(x,y) stbi__err(y) +#else + #define stbi__err(x,y) stbi__err(x) +#endif + +#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) +#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) + +STBIDEF void stbi_image_free(void *retval_from_stbi_load) +{ + STBI_FREE(retval_from_stbi_load); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); +#endif + +#ifndef STBI_NO_HDR +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); +#endif + +static int stbi__vertically_flip_on_load_global = 0; + +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load_global = flag_true_if_should_flip; +} + +#ifndef STBI_THREAD_LOCAL +#define stbi__vertically_flip_on_load stbi__vertically_flip_on_load_global +#else +static STBI_THREAD_LOCAL int stbi__vertically_flip_on_load_local, stbi__vertically_flip_on_load_set; + +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load_local = flag_true_if_should_flip; + stbi__vertically_flip_on_load_set = 1; +} + +#define stbi__vertically_flip_on_load (stbi__vertically_flip_on_load_set \ + ? stbi__vertically_flip_on_load_local \ + : stbi__vertically_flip_on_load_global) +#endif // STBI_THREAD_LOCAL + +static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; + + // test the formats with a very explicit header first (at least a FOURCC + // or distinctive magic number first) + #ifndef STBI_NO_PNG + if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_BMP + if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_GIF + if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PSD + if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc); + #else + STBI_NOTUSED(bpc); + #endif + #ifndef STBI_NO_PIC + if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri); + #endif + + // then the formats that can end up attempting to load with just 1 or 2 + // bytes matching expectations; these are prone to false positives, so + // try them later + #ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNM + if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri); + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } + #endif + + #ifndef STBI_NO_TGA + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s,x,y,comp,req_comp, ri); + #endif + + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); +} + +static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi_uc *reduced; + + reduced = (stbi_uc *) stbi__malloc(img_len); + if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + + STBI_FREE(orig); + return reduced; +} + +static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; + + enlarged = (stbi__uint16 *) stbi__malloc(img_len*2); + if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + + STBI_FREE(orig); + return enlarged; +} + +static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel) +{ + int row; + size_t bytes_per_row = (size_t)w * bytes_per_pixel; + stbi_uc temp[2048]; + stbi_uc *bytes = (stbi_uc *)image; + + for (row = 0; row < (h>>1); row++) { + stbi_uc *row0 = bytes + row*bytes_per_row; + stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row; + // swap row0 with row1 + size_t bytes_left = bytes_per_row; + while (bytes_left) { + size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp); + memcpy(temp, row0, bytes_copy); + memcpy(row0, row1, bytes_copy); + memcpy(row1, temp, bytes_copy); + row0 += bytes_copy; + row1 += bytes_copy; + bytes_left -= bytes_copy; + } + } +} + +#ifndef STBI_NO_GIF +static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel) +{ + int slice; + int slice_size = w * h * bytes_per_pixel; + + stbi_uc *bytes = (stbi_uc *)image; + for (slice = 0; slice < z; ++slice) { + stbi__vertical_flip(bytes, w, h, bytes_per_pixel); + bytes += slice_size; + } +} +#endif + +static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 8) { + result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } + + // @TODO: move stbi__convert_format to here + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc)); + } + + return (unsigned char *) result; +} + +static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 16) { + result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } + + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16)); + } + + return (stbi__uint16 *) result; +} + +#if !defined(STBI_NO_HDR) && !defined(STBI_NO_LINEAR) +static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) +{ + if (stbi__vertically_flip_on_load && result != NULL) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(float)); + } +} +#endif + +#ifndef STBI_NO_STDIO + +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) +STBI_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide); +STBI_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default); +#endif + +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input) +{ + return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL); +} +#endif + +static FILE *stbi__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) + wchar_t wMode[64]; + wchar_t wFilename[1024]; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename)/sizeof(*wFilename))) + return 0; + + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode)/sizeof(*wMode))) + return 0; + +#if defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != _wfopen_s(&f, wFilename, wMode)) + f = 0; +#else + f = _wfopen(wFilename, wMode); +#endif + +#elif defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != fopen_s(&f, filename, mode)) + f=0; +#else + f = fopen(filename, mode); +#endif + return f; +} + + +STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f,x,y,comp,req_comp); + fclose(f); + return result; +} + + +#endif //!STBI_NO_STDIO + +STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_mem(&s,buffer,len); + + result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp); + if (stbi__vertically_flip_on_load) { + stbi__vertical_flip_slices( result, *x, *y, *z, *comp ); + } + + return result; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *data; + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri); + if (hdr_data) + stbi__float_postprocess(hdr_data,x,y,comp,req_comp); + return hdr_data; + } + #endif + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); +} + +STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_STDIO +STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_file(&s,f); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} +#endif // !STBI_NO_STDIO + +#endif // !STBI_NO_LINEAR + +// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is +// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always +// reports false! + +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; + #endif +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result=0; + if (f) { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; +} + +STBIDEF int stbi_is_hdr_from_file(FILE *f) +{ + #ifndef STBI_NO_HDR + long pos = ftell(f); + int res; + stbi__context s; + stbi__start_file(&s,f); + res = stbi__hdr_test(&s); + fseek(f, pos, SEEK_SET); + return res; + #else + STBI_NOTUSED(f); + return 0; + #endif +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; + #endif +} + +#ifndef STBI_NO_LINEAR +static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f; + +STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } +STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } +#endif + +static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f; + +STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; } +STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; } + + +////////////////////////////////////////////////////////////////////////////// +// +// Common code used by all image loaders +// + +enum +{ + STBI__SCAN_load=0, + STBI__SCAN_type, + STBI__SCAN_header +}; + +static void stbi__refill_buffer(stbi__context *s) +{ + int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen); + s->callback_already_read += (int) (s->img_buffer - s->img_buffer_original); + if (n == 0) { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start+1; + *s->img_buffer = 0; + } else { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } +} + +stbi_inline static stbi_uc stbi__get8(stbi__context *s) +{ + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; +} + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_HDR) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +stbi_inline static int stbi__at_eof(stbi__context *s) +{ + if (s->io.read) { + if (!(s->io.eof)(s->io_user_data)) return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) return 1; + } + + return s->img_buffer >= s->img_buffer_end; +} +#endif + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) +// nothing +#else +static void stbi__skip(stbi__context *s, int n) +{ + if (n == 0) return; // already there! + if (n < 0) { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_TGA) && defined(STBI_NO_HDR) && defined(STBI_NO_PNM) +// nothing +#else +static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) +{ + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + int res, count; + + memcpy(buffer, s->img_buffer, blen); + + count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen); + res = (count == (n-blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } + + if (s->img_buffer+n <= s->img_buffer_end) { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } else + return 0; +} +#endif + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else +static int stbi__get16be(stbi__context *s) +{ + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else +static stbi__uint32 stbi__get32be(stbi__context *s) +{ + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); +} +#endif + +#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) +// nothing +#else +static int stbi__get16le(stbi__context *s) +{ + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); +} +#endif + +#ifndef STBI_NO_BMP +static stbi__uint32 stbi__get32le(stbi__context *s) +{ + stbi__uint32 z = stbi__get16le(s); + z += (stbi__uint32)stbi__get16le(s) << 16; + return z; +} +#endif + +#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +////////////////////////////////////////////////////////////////////////////// +// +// generic converter from built-in img_n to req_comp +// individual types do this automatically as much as possible (e.g. jpeg +// does all cases internally since it needs to colorspace convert anyway, +// and it never has alpha, so very few cases ). png can automatically +// interleave an alpha=255 channel, but falls back to this for other cases +// +// assume data buffer is malloced, so malloc a new one and free that one +// only failure mode is malloc failing + +static stbi_uc stbi__compute_y(int r, int g, int b) +{ + return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + unsigned char *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0); + if (good == NULL) { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + unsigned char *src = data + j * x * img_n ; + unsigned char *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=255; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=255; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=255; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = 255; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 stbi__compute_y_16(int r, int g, int b) +{ + return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + stbi__uint16 *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); + if (good == NULL) { + STBI_FREE(data); + return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + stbi__uint16 *src = data + j * x * img_n ; + stbi__uint16 *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=0xffff; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=0xffff; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=0xffff; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = 0xffff; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return (stbi__uint16*) stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) +{ + int i,k,n; + float *output; + if (!data) return NULL; + output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + } + if (n < comp) { + for (i=0; i < x*y; ++i) { + output[i*comp + n] = data[i*comp + n]/255.0f; + } + } + STBI_FREE(data); + return output; +} +#endif + +#ifndef STBI_NO_HDR +#define stbi__float2int(x) ((int) (x)) +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) +{ + int i,k,n; + stbi_uc *output; + if (!data) return NULL; + output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + if (k < comp) { + float z = data[i*comp+k] * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + } + STBI_FREE(data); + return output; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// "baseline" JPEG/JFIF decoder +// +// simple implementation +// - doesn't support delayed output of y-dimension +// - simple interface (only one output format: 8-bit interleaved RGB) +// - doesn't try to recover corrupt jpegs +// - doesn't allow partial loading, loading multiple at once +// - still fast on x86 (copying globals into locals doesn't help x86) +// - allocates lots of intermediate memory (full size of all components) +// - non-interleaved case requires this anyway +// - allows good upsampling (see next) +// high-quality +// - upsampled channels are bilinearly interpolated, even across blocks +// - quality integer IDCT derived from IJG's 'slow' +// performance +// - fast huffman; reasonable integer IDCT +// - some SIMD kernels for common paths on targets with SSE2/NEON +// - uses a lot of intermediate memory, could cache poorly + +#ifndef STBI_NO_JPEG + +// huffman decoding acceleration +#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache + +typedef struct +{ + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' +} stbi__huffman; + +typedef struct +{ + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi__uint16 dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; + +// sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; + +// definition of jpeg image component + struct + { + int id; + int h,v; + int tq; + int hd,ha; + int dc_pred; + + int x,y,w2,h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; + + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop + + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + int jfif; + int app14_color_transform; // Adobe APP14 tag + int rgb; + + int scan_n, order[4]; + int restart_interval, todo; + +// kernels + void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); +} stbi__jpeg; + +static int stbi__build_huffman(stbi__huffman *h, int *count) +{ + int i,j,k=0; + unsigned int code; + // build size list for each symbol (from JPEG spec) + for (i=0; i < 16; ++i) { + for (j=0; j < count[i]; ++j) { + h->size[k++] = (stbi_uc) (i+1); + if(k >= 257) return stbi__err("bad size list","Corrupt JPEG"); + } + } + h->size[k] = 0; + + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for(j=1; j <= 16; ++j) { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16) (code++); + if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16-j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; + + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i=0; i < k; ++i) { + int s = h->size[i]; + if (s <= FAST_BITS) { + int c = h->code[i] << (FAST_BITS-s); + int m = 1 << (FAST_BITS-s); + for (j=0; j < m; ++j) { + h->fast[c+j] = (stbi_uc) i; + } + } + } + return 1; +} + +// build a table that decodes both magnitude and value of small ACs in +// one go. +static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) +{ + int i; + for (i=0; i < (1 << FAST_BITS); ++i) { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; + + if (magbits && len + magbits <= FAST_BITS) { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) k += (~0U << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits)); + } + } + } +} + +static void stbi__grow_buffer_unsafe(stbi__jpeg *j) +{ + do { + unsigned int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) { + int c = stbi__get8(j->s); + while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes + if (c != 0) { + j->marker = (unsigned char) c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); +} + +// (1 << n) - 1 +static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; + +// decode a jpeg huffman value from the bitstream +stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) +{ + unsigned int temp; + int c,k; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + k = h->fast[c]; + if (k < 255) { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } + + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k=FAST_BITS+1 ; ; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) { + // error! code not found + j->code_bits -= 16; + return -1; + } + + if (k > j->code_bits) + return -1; + + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + if(c < 0 || c >= 256) // symbol id out of bounds! + return -1; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; +} + +// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); + if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s intead of continuing + + sgn = j->code_buffer >> 31; // sign bit always in MSB; 0 if MSB clear (positive), 1 if MSB set (negative) + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & (sgn - 1)); +} + +// get some unsigned bits +stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) +{ + unsigned int k; + if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s intead of continuing + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; +} + +stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) +{ + unsigned int k; + if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + if (j->code_bits < 1) return 0; // ran out of bits from stream, return 0s intead of continuing + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; +} + +// given a value that's at position X in the zigzag stream, +// where does it appear in the 8x8 matrix coded as row-major? +static const stbi_uc stbi__jpeg_dezigzag[64+15] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63 +}; + +// decode one 64-entry block-- +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant) +{ + int diff,dc,k; + int t; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0 || t > 15) return stbi__err("bad huffman code","Corrupt JPEG"); + + // 0 all the ac values now so we can do it 32-bits at a time + memset(data,0,64*sizeof(data[0])); + + diff = t ? stbi__extend_receive(j, t) : 0; + if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) return stbi__err("bad delta","Corrupt JPEG"); + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + if (!stbi__mul2shorts_valid(dc, dequant[0])) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + data[0] = (short) (dc * dequant[0]); + + // decode AC components, see JPEG spec + k = 1; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + if (s > j->code_bits) return stbi__err("bad huffman code", "Combined length longer than code bits available"); + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * dequant[zig]); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (rs != 0xf0) break; // end block + k += 16; + } else { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]); + } + } + } while (k < 64); + return 1; +} + +static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) +{ + int diff,dc; + int t; + if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + if (j->succ_high == 0) { + // first scan for DC coefficient, must be first + memset(data,0,64*sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0 || t > 15) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + diff = t ? stbi__extend_receive(j, t) : 0; + + if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) return stbi__err("bad delta", "Corrupt JPEG"); + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + if (!stbi__mul2shorts_valid(dc, 1 << j->succ_low)) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + data[0] = (short) (dc * (1 << j->succ_low)); + } else { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short) (1 << j->succ_low); + } + return 1; +} + +// @OPTIMIZE: store non-zigzagged during the decode passes, +// and only de-zigzag when dequantizing +static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) +{ + int k; + if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->succ_high == 0) { + int shift = j->succ_low; + + if (j->eob_run) { + --j->eob_run; + return 1; + } + + k = j->spec_start; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + if (s > j->code_bits) return stbi__err("bad huffman code", "Combined length longer than code bits available"); + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * (1 << shift)); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } else { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * (1 << shift)); + } + } + } while (k <= j->spec_end); + } else { + // refinement scan for these AC coefficients + + short bit = (short) (1 << j->succ_low); + + if (j->eob_run) { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } else { + k = j->spec_start; + do { + int r,s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } else { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } else { + if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } + + // advance by r + while (k <= j->spec_end) { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } else { + if (r == 0) { + *p = (short) s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; +} + +// take a -128..127 value and stbi__clamp it and convert to 0..255 +stbi_inline static stbi_uc stbi__clamp(int x) +{ + // trick to use a single test to catch both cases + if ((unsigned int) x > 255) { + if (x < 0) return 0; + if (x > 255) return 255; + } + return (stbi_uc) x; +} + +#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) * 4096) + +// derived from jidctint -- DCT_ISLOW +#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ + int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2+p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3*stbi__f2f(-1.847759065f); \ + t3 = p1 + p2*stbi__f2f( 0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2+p3); \ + t1 = stbi__fsh(p2-p3); \ + x0 = t0+t3; \ + x3 = t0-t3; \ + x1 = t1+t2; \ + x2 = t1-t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0+t2; \ + p4 = t1+t3; \ + p1 = t0+t3; \ + p2 = t1+t2; \ + p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ + t0 = t0*stbi__f2f( 0.298631336f); \ + t1 = t1*stbi__f2f( 2.053119869f); \ + t2 = t2*stbi__f2f( 3.072711026f); \ + t3 = t3*stbi__f2f( 1.501321110f); \ + p1 = p5 + p1*stbi__f2f(-0.899976223f); \ + p2 = p5 + p2*stbi__f2f(-2.562915447f); \ + p3 = p3*stbi__f2f(-1.961570560f); \ + p4 = p4*stbi__f2f(-0.390180644f); \ + t3 += p1+p4; \ + t2 += p2+p3; \ + t1 += p2+p4; \ + t0 += p1+p3; + +static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) +{ + int i,val[64],*v=val; + stbi_uc *o; + short *d = data; + + // columns + for (i=0; i < 8; ++i,++d, ++v) { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0 + && d[40]==0 && d[48]==0 && d[56]==0) { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0]*4; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } else { + STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; x1 += 512; x2 += 512; x3 += 512; + v[ 0] = (x0+t3) >> 10; + v[56] = (x0-t3) >> 10; + v[ 8] = (x1+t2) >> 10; + v[48] = (x1-t2) >> 10; + v[16] = (x2+t1) >> 10; + v[40] = (x2-t1) >> 10; + v[24] = (x3+t0) >> 10; + v[32] = (x3-t0) >> 10; + } + } + + for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128<<17); + x1 += 65536 + (128<<17); + x2 += 65536 + (128<<17); + x3 += 65536 + (128<<17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0+t3) >> 17); + o[7] = stbi__clamp((x0-t3) >> 17); + o[1] = stbi__clamp((x1+t2) >> 17); + o[6] = stbi__clamp((x1-t2) >> 17); + o[2] = stbi__clamp((x2+t1) >> 17); + o[5] = stbi__clamp((x2-t1) >> 17); + o[3] = stbi__clamp((x3+t0) >> 17); + o[4] = stbi__clamp((x3-t0) >> 17); + } +} + +#ifdef STBI_SSE2 +// sse2 integer IDCT. not the fastest possible implementation but it +// produces bit-identical results to the generic C version so it's +// fully "transparent". +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; + + // dot product constant: even elems=x, odd elems=y + #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) + + // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) + // out(1) = c1[even]*x + c1[odd]*y + #define dct_rot(out0,out1, x,y,c0,c1) \ + __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + + // out = in << 12 (in 16-bit, out 32-bit) + #define dct_widen(out, in) \ + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + + // wide add + #define dct_wadd(out, a, b) \ + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + + // wide sub + #define dct_wsub(out, a, b) \ + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + + // butterfly a/b, add bias, then shift by "s" and pack + #define dct_bfly32o(out0, out1, a,b,bias,s) \ + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } + + // 8-bit interleave step (for transposes) + #define dct_interleave8(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) + + // 16-bit interleave step (for transposes) + #define dct_interleave16(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) + + #define dct_pass(bias,shift) \ + { \ + /* even part */ \ + dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ + dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0,row7, x0,x7,bias,shift); \ + dct_bfly32o(row1,row6, x1,x6,bias,shift); \ + dct_bfly32o(row2,row5, x2,x5,bias,shift); \ + dct_bfly32o(row3,row4, x3,x4,bias,shift); \ + } + + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f)); + + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17)); + + // load + row0 = _mm_load_si128((const __m128i *) (data + 0*8)); + row1 = _mm_load_si128((const __m128i *) (data + 1*8)); + row2 = _mm_load_si128((const __m128i *) (data + 2*8)); + row3 = _mm_load_si128((const __m128i *) (data + 3*8)); + row4 = _mm_load_si128((const __m128i *) (data + 4*8)); + row5 = _mm_load_si128((const __m128i *) (data + 5*8)); + row6 = _mm_load_si128((const __m128i *) (data + 6*8)); + row7 = _mm_load_si128((const __m128i *) (data + 7*8)); + + // column pass + dct_pass(bias_0, 10); + + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); + + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); + + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } + + // row pass + dct_pass(bias_1, 17); + + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); + + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... + + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... + + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... + + // store + _mm_storel_epi64((__m128i *) out, p0); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p2); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p1); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p3); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); + } + +#undef dct_const +#undef dct_rot +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_interleave8 +#undef dct_interleave16 +#undef dct_pass +} + +#endif // STBI_SSE2 + +#ifdef STBI_NEON + +// NEON integer IDCT. should produce bit-identical +// results to the generic C version. +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f)); + +#define dct_long_mul(out, inq, coeff) \ + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + +#define dct_long_mac(out, acc, inq, coeff) \ + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + +#define dct_widen(out, inq) \ + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + +// wide add +#define dct_wadd(out, a, b) \ + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + +// wide sub +#define dct_wsub(out, a, b) \ + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + +// butterfly a/b, then shift using "shiftop" by "s" and pack +#define dct_bfly32o(out0,out1, a,b,shiftop,s) \ + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } + +#define dct_pass(shiftop, shift) \ + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ + dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ + dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ + dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ + } + + // load + row0 = vld1q_s16(data + 0*8); + row1 = vld1q_s16(data + 1*8); + row2 = vld1q_s16(data + 2*8); + row3 = vld1q_s16(data + 3*8); + row4 = vld1q_s16(data + 4*8); + row5 = vld1q_s16(data + 5*8); + row6 = vld1q_s16(data + 6*8); + row7 = vld1q_s16(data + 7*8); + + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + + // column pass + dct_pass(vrshrn_n_s32, 10); + + // 16bit 8x8 transpose + { +// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. +// whether compilers actually get this is another story, sadly. +#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } +#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } + + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); + + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); + + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); + +#undef dct_trn16 +#undef dct_trn32 +#undef dct_trn64 + } + + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); + + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + + // again, these can translate into one instruction, but often don't. +#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } +#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } + + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! + + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); + + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); + + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); + + // store + vst1_u8(out, p0); out += out_stride; + vst1_u8(out, p1); out += out_stride; + vst1_u8(out, p2); out += out_stride; + vst1_u8(out, p3); out += out_stride; + vst1_u8(out, p4); out += out_stride; + vst1_u8(out, p5); out += out_stride; + vst1_u8(out, p6); out += out_stride; + vst1_u8(out, p7); + +#undef dct_trn8_8 +#undef dct_trn8_16 +#undef dct_trn8_32 + } + +#undef dct_long_mul +#undef dct_long_mac +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_pass +} + +#endif // STBI_NEON + +#define STBI__MARKER_none 0xff +// if there's a pending marker from the entropy stream, return that +// otherwise, fetch from the stream and get a marker. if there's no +// marker, return 0xff, which is never a valid marker value +static stbi_uc stbi__get_marker(stbi__jpeg *j) +{ + stbi_uc x; + if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } + x = stbi__get8(j->s); + if (x != 0xff) return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); // consume repeated 0xff fill bytes + return x; +} + +// in each scan, we'll have scan_n components, and the order +// of the components is specified by order[] +#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) + +// after a restart interval, stbi__jpeg_reset the entropy decoder and +// the dc prediction +static void stbi__jpeg_reset(stbi__jpeg *j) +{ + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels +} + +static int stbi__parse_entropy_coded_data(stbi__jpeg *z) +{ + stbi__jpeg_reset(z); + if (!z->progressive) { + if (z->scan_n == 1) { + int i,j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + STBI_SIMD_ALIGN(short, data[64]); + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x)*8; + int y2 = (j*z->img_comp[n].v + y)*8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } else { + if (z->scan_n == 1) { + int i,j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } else { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x); + int y2 = (j*z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } +} + +static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant) +{ + int i; + for (i=0; i < 64; ++i) + data[i] *= dequant[i]; +} + +static void stbi__jpeg_finish(stbi__jpeg *z) +{ + if (z->progressive) { + // dequantize and idct the data + int i,j,n; + for (n=0; n < z->s->img_n; ++n) { + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + } + } + } + } +} + +static int stbi__process_marker(stbi__jpeg *z, int m) +{ + int L; + switch (m) { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker","Corrupt JPEG"); + + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; + + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s)-2; + while (L > 0) { + int q = stbi__get8(z->s); + int p = q >> 4, sixteen = (p != 0); + int t = q & 15,i; + if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG"); + if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG"); + + for (i=0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s)); + L -= (sixteen ? 129 : 65); + } + return L==0; + + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s)-2; + while (L > 0) { + stbi_uc *v; + int sizes[16],i,n=0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG"); + for (i=0; i < 16; ++i) { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + if(n > 256) return stbi__err("bad DHT header","Corrupt JPEG"); // Loop over i < n would write past end of values! + L -= 17; + if (tc == 0) { + if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; + v = z->huff_dc[th].values; + } else { + if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0; + v = z->huff_ac[th].values; + } + for (i=0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L==0; + } + + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { + L = stbi__get16be(z->s); + if (L < 2) { + if (m == 0xFE) + return stbi__err("bad COM len","Corrupt JPEG"); + else + return stbi__err("bad APP len","Corrupt JPEG"); + } + L -= 2; + + if (m == 0xE0 && L >= 5) { // JFIF APP0 segment + static const unsigned char tag[5] = {'J','F','I','F','\0'}; + int ok = 1; + int i; + for (i=0; i < 5; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 5; + if (ok) + z->jfif = 1; + } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment + static const unsigned char tag[6] = {'A','d','o','b','e','\0'}; + int ok = 1; + int i; + for (i=0; i < 6; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 6; + if (ok) { + stbi__get8(z->s); // version + stbi__get16be(z->s); // flags0 + stbi__get16be(z->s); // flags1 + z->app14_color_transform = stbi__get8(z->s); // color transform + L -= 6; + } + } + + stbi__skip(z->s, L); + return 1; + } + + return stbi__err("unknown marker","Corrupt JPEG"); +} + +// after we see SOS +static int stbi__process_scan_header(stbi__jpeg *z) +{ + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG"); + if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG"); + for (i=0; i < z->scan_n; ++i) { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) return 0; // no match + z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG"); + z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG"); + z->order[i] = which; + } + + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } else { + if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG"); + z->spec_end = 63; + } + } + + return 1; +} + +static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) +{ + int i; + for (i=0; i < ncomp; ++i) { + if (z->img_comp[i].raw_data) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; +} + +static int stbi__process_frame_header(stbi__jpeg *z, int scan) +{ + stbi__context *s = z->s; + int Lf,p,i,q, h_max=1,v_max=1,c; + Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG + p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + c = stbi__get8(s); + if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG"); + s->img_n = c; + for (i=0; i < c; ++i) { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } + + if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG"); + + z->rgb = 0; + for (i=0; i < s->img_n; ++i) { + static const unsigned char rgb[3] = { 'R', 'G', 'B' }; + z->img_comp[i].id = stbi__get8(s); + if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) + ++z->rgb; + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG"); + z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG"); + } + + if (scan != STBI__SCAN_load) return 1; + + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); + + for (i=0; i < s->img_n; ++i) { + if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; + } + + // check that plane subsampling factors are integer ratios; our resamplers can't deal with fractional ratios + // and I've never seen a non-corrupted JPEG file actually use them + for (i=0; i < s->img_n; ++i) { + if (h_max % z->img_comp[i].h != 0) return stbi__err("bad H","Corrupt JPEG"); + if (v_max % z->img_comp[i].v != 0) return stbi__err("bad V","Corrupt JPEG"); + } + + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits + z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h; + + for (i=0; i < s->img_n; ++i) { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15); + if (z->progressive) { + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15); + } + } + + return 1; +} + +// use comparisons since in some cases we handle more than one case (e.g. SOF) +#define stbi__DNL(x) ((x) == 0xdc) +#define stbi__SOI(x) ((x) == 0xd8) +#define stbi__EOI(x) ((x) == 0xd9) +#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) +#define stbi__SOS(x) ((x) == 0xda) + +#define stbi__SOF_progressive(x) ((x) == 0xc2) + +static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) +{ + int m; + z->jfif = 0; + z->app14_color_transform = -1; // valid values are 0,1,2 + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG"); + if (scan == STBI__SCAN_type) return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) { + if (!stbi__process_marker(z,m)) return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) return 0; + return 1; +} + +static stbi_uc stbi__skip_jpeg_junk_at_end(stbi__jpeg *j) +{ + // some JPEGs have junk at end, skip over it but if we find what looks + // like a valid marker, resume there + while (!stbi__at_eof(j->s)) { + stbi_uc x = stbi__get8(j->s); + while (x == 0xff) { // might be a marker + if (stbi__at_eof(j->s)) return STBI__MARKER_none; + x = stbi__get8(j->s); + if (x != 0x00 && x != 0xff) { + // not a stuffed zero or lead-in to another marker, looks + // like an actual marker, return it + return x; + } + // stuffed zero has x=0 now which ends the loop, meaning we go + // back to regular scan loop. + // repeated 0xff keeps trying to read the next byte of the marker. + } + } + return STBI__MARKER_none; +} + +// decode image to YCbCr format +static int stbi__decode_jpeg_image(stbi__jpeg *j) +{ + int m; + for (m = 0; m < 4; m++) { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) { + if (stbi__SOS(m)) { + if (!stbi__process_scan_header(j)) return 0; + if (!stbi__parse_entropy_coded_data(j)) return 0; + if (j->marker == STBI__MARKER_none ) { + j->marker = stbi__skip_jpeg_junk_at_end(j); + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + m = stbi__get_marker(j); + if (STBI__RESTART(m)) + m = stbi__get_marker(j); + } else if (stbi__DNL(m)) { + int Ld = stbi__get16be(j->s); + stbi__uint32 NL = stbi__get16be(j->s); + if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG"); + if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG"); + m = stbi__get_marker(j); + } else { + if (!stbi__process_marker(j, m)) return 1; + m = stbi__get_marker(j); + } + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; +} + +// static jfif-centered resampling (across block boundaries) + +typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, + int w, int hs); + +#define stbi__div4(x) ((stbi_uc) ((x) >> 2)) + +static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; +} + +static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i=0; i < w; ++i) + out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2); + return out; +} + +static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; + + if (w == 1) { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } + + out[0] = input[0]; + out[1] = stbi__div4(input[0]*3 + input[1] + 2); + for (i=1; i < w-1; ++i) { + int n = 3*input[i]+2; + out[i*2+0] = stbi__div4(n+input[i-1]); + out[i*2+1] = stbi__div4(n+input[i+1]); + } + out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2); + out[i*2+1] = input[w-1]; + + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); + + return out; +} + +#define stbi__div16(x) ((stbi_uc) ((x) >> 4)) + +static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i,t0,t1; + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + out[0] = stbi__div4(t1+2); + for (i=1; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i=0,t0,t1; + + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w-1) & ~7); i += 8) { +#if defined(STBI_SSE2) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); + + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); + + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *) (out + i*2), outv); +#elif defined(STBI_NEON) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); + + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i*2, o); +#endif + + // "previous" value for next iter + t1 = 3*in_near[i+7] + in_far[i+7]; + } + + t0 = t1; + t1 = 3*in_near[i] + in_far[i]; + out[i*2] = stbi__div16(3*t1 + t0 + 8); + + for (++i; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} +#endif + +static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // resample with nearest-neighbor + int i,j; + STBI_NOTUSED(in_far); + for (i=0; i < w; ++i) + for (j=0; j < hs; ++j) + out[i*hs+j] = in_near[i]; + return out; +} + +// this is a reduced-precision calculation of YCbCr-to-RGB introduced +// to make sure the code produces the same results in both SIMD and scalar +#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i=0; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) +{ + int i = 0; + +#ifdef STBI_SSE2 + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f)); + __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f)); + __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f)); + __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f)); + __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128); + __m128i xw = _mm_set1_epi16(255); // alpha channel + + for (; i+7 < count; i += 8) { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); + + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); + + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); + + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); + + // store + _mm_storeu_si128((__m128i *) (out + 0), o0); + _mm_storeu_si128((__m128i *) (out + 16), o1); + out += 32; + } + } +#endif + +#ifdef STBI_NEON + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f)); + int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f)); + int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f)); + int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f)); + + for (; i+7 < count; i += 8) { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); + + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); + + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); + + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8*4; + } + } +#endif + + for (; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +// set up the kernels +static void stbi__setup_jpeg(stbi__jpeg *j) +{ + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + +#ifdef STBI_SSE2 + if (stbi__sse2_available()) { + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } +#endif + +#ifdef STBI_NEON + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; +#endif +} + +// clean up the temporary component buffers +static void stbi__cleanup_jpeg(stbi__jpeg *j) +{ + stbi__free_jpeg_components(j, j->s->img_n, 0); +} + +typedef struct +{ + resample_row_func resample; + stbi_uc *line0,*line1; + int hs,vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on +} stbi__resample; + +// fast 0..255 * 0..255 => 0..255 rounded multiplication +static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y) +{ + unsigned int t = x*y + 128; + return (stbi_uc) ((t + (t >>8)) >> 8); +} + +static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) +{ + int n, decode_n, is_rgb; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe + + // validate req_comp + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } + + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1; + + is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif)); + + if (z->s->img_n == 3 && n < 3 && !is_rgb) + decode_n = 1; + else + decode_n = z->s->img_n; + + // nothing to do if no components requested; check this now to avoid + // accessing uninitialized coutput[0] later + if (decode_n <= 0) { stbi__cleanup_jpeg(z); return NULL; } + + // resample and color-convert + { + int k; + unsigned int i,j; + stbi_uc *output; + stbi_uc *coutput[4] = { NULL, NULL, NULL, NULL }; + + stbi__resample res_comp[4]; + + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs-1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; + + if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; + else r->resample = stbi__resample_row_generic; + } + + // can't error after this so, this is safe + output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); + if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + // now go ahead and resample + for (j=0; j < z->s->img_y; ++j) { + stbi_uc *out = output + n * z->s->img_x * j; + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) { + if (is_rgb) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = y[i]; + out[1] = coutput[1][i]; + out[2] = coutput[2][i]; + out[3] = 255; + out += n; + } + } else { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else if (z->s->img_n == 4) { + if (z->app14_color_transform == 0) { // CMYK + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(coutput[0][i], m); + out[1] = stbi__blinn_8x8(coutput[1][i], m); + out[2] = stbi__blinn_8x8(coutput[2][i], m); + out[3] = 255; + out += n; + } + } else if (z->app14_color_transform == 2) { // YCCK + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(255 - out[0], m); + out[1] = stbi__blinn_8x8(255 - out[1], m); + out[2] = stbi__blinn_8x8(255 - out[2], m); + out += n; + } + } else { // YCbCr + alpha? Ignore the fourth channel for now + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else + for (i=0; i < z->s->img_x; ++i) { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } else { + if (is_rgb) { + if (n == 1) + for (i=0; i < z->s->img_x; ++i) + *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + else { + for (i=0; i < z->s->img_x; ++i, out += 2) { + out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + out[1] = 255; + } + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 0) { + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + stbi_uc r = stbi__blinn_8x8(coutput[0][i], m); + stbi_uc g = stbi__blinn_8x8(coutput[1][i], m); + stbi_uc b = stbi__blinn_8x8(coutput[2][i], m); + out[0] = stbi__compute_y(r, g, b); + out[1] = 255; + out += n; + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 2) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]); + out[1] = 255; + out += n; + } + } else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i=0; i < z->s->img_x; ++i) { *out++ = y[i]; *out++ = 255; } + } + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output + return output; + } +} + +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + unsigned char* result; + stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); + if (!j) return stbi__errpuc("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); + STBI_NOTUSED(ri); + j->s = s; + stbi__setup_jpeg(j); + result = load_jpeg_image(j, x,y,comp,req_comp); + STBI_FREE(j); + return result; +} + +static int stbi__jpeg_test(stbi__context *s) +{ + int r; + stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + if (!j) return stbi__err("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); + j->s = s; + stbi__setup_jpeg(j); + r = stbi__decode_jpeg_header(j, STBI__SCAN_type); + stbi__rewind(s); + STBI_FREE(j); + return r; +} + +static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) +{ + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { + stbi__rewind( j->s ); + return 0; + } + if (x) *x = j->s->img_x; + if (y) *y = j->s->img_y; + if (comp) *comp = j->s->img_n >= 3 ? 3 : 1; + return 1; +} + +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) +{ + int result; + stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); + if (!j) return stbi__err("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); + j->s = s; + result = stbi__jpeg_info_raw(j, x, y, comp); + STBI_FREE(j); + return result; +} +#endif + +// public domain zlib decode v0.2 Sean Barrett 2006-11-18 +// simple implementation +// - all input must be provided in an upfront buffer +// - all output is written to a single output buffer (can malloc/realloc) +// performance +// - fast huffman + +#ifndef STBI_NO_ZLIB + +// fast-way is faster to check than jpeg huffman, but slow way is slower +#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables +#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) +#define STBI__ZNSYMS 288 // number of symbols in literal/length alphabet + +// zlib-style huffman encoding +// (jpegs packs from left, zlib from right, so can't share code) +typedef struct +{ + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[STBI__ZNSYMS]; + stbi__uint16 value[STBI__ZNSYMS]; +} stbi__zhuffman; + +stbi_inline static int stbi__bitreverse16(int n) +{ + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; +} + +stbi_inline static int stbi__bit_reverse(int v, int bits) +{ + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16-bits); +} + +static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num) +{ + int i,k=0; + int code, next_code[16], sizes[17]; + + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i=0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i=1; i < 16; ++i) + if (sizes[i] > (1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i=1; i < 16; ++i) { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16) code; + z->firstsymbol[i] = (stbi__uint16) k; + code = (code + sizes[i]); + if (sizes[i]) + if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG"); + z->maxcode[i] = code << (16-i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i=0; i < num; ++i) { + int s = sizelist[i]; + if (s) { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i); + z->size [c] = (stbi_uc ) s; + z->value[c] = (stbi__uint16) i; + if (s <= STBI__ZFAST_BITS) { + int j = stbi__bit_reverse(next_code[s],s); + while (j < (1 << STBI__ZFAST_BITS)) { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; +} + +// zlib-from-memory implementation for PNG reading +// because PNG allows splitting the zlib stream arbitrarily, +// and it's annoying structurally to have PNG call ZLIB call PNG, +// we require PNG read all the IDATs and combine them into a single +// memory buffer + +typedef struct +{ + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + int hit_zeof_once; + stbi__uint32 code_buffer; + + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; + + stbi__zhuffman z_length, z_distance; +} stbi__zbuf; + +stbi_inline static int stbi__zeof(stbi__zbuf *z) +{ + return (z->zbuffer >= z->zbuffer_end); +} + +stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) +{ + return stbi__zeof(z) ? 0 : *z->zbuffer++; +} + +static void stbi__fill_bits(stbi__zbuf *z) +{ + do { + if (z->code_buffer >= (1U << z->num_bits)) { + z->zbuffer = z->zbuffer_end; /* treat this as EOF so we fail. */ + return; + } + z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); +} + +stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) +{ + unsigned int k; + if (z->num_bits < n) stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; +} + +static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s,k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s=STBI__ZFAST_BITS+1; ; ++s) + if (k < z->maxcode[s]) + break; + if (s >= 16) return -1; // invalid code! + // code size is s, so: + b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; + if (b >= STBI__ZNSYMS) return -1; // some data was corrupt somewhere! + if (z->size[b] != s) return -1; // was originally an assert, but report failure instead. + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; +} + +stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s; + if (a->num_bits < 16) { + if (stbi__zeof(a)) { + if (!a->hit_zeof_once) { + // This is the first time we hit eof, insert 16 extra padding btis + // to allow us to keep going; if we actually consume any of them + // though, that is invalid data. This is caught later. + a->hit_zeof_once = 1; + a->num_bits += 16; // add 16 implicit zero bits + } else { + // We already inserted our extra 16 padding bits and are again + // out, this stream is actually prematurely terminated. + return -1; + } + } else { + stbi__fill_bits(a); + } + } + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); +} + +static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes +{ + char *q; + unsigned int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG"); + cur = (unsigned int) (z->zout - z->zout_start); + limit = old_limit = (unsigned) (z->zout_end - z->zout_start); + if (UINT_MAX - cur < (unsigned) n) return stbi__err("outofmem", "Out of memory"); + while (cur + n > limit) { + if(limit > UINT_MAX / 2) return stbi__err("outofmem", "Out of memory"); + limit *= 2; + } + q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; +} + +static const int stbi__zlength_base[31] = { + 3,4,5,6,7,8,9,10,11,13, + 15,17,19,23,27,31,35,43,51,59, + 67,83,99,115,131,163,195,227,258,0,0 }; + +static const int stbi__zlength_extra[31]= +{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; + +static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; + +static const int stbi__zdist_extra[32] = +{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +static int stbi__parse_huffman_block(stbi__zbuf *a) +{ + char *zout = a->zout; + for(;;) { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) { + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) { + if (!stbi__zexpand(a, zout, 1)) return 0; + zout = a->zout; + } + *zout++ = (char) z; + } else { + stbi_uc *p; + int len,dist; + if (z == 256) { + a->zout = zout; + if (a->hit_zeof_once && a->num_bits < 16) { + // The first time we hit zeof, we inserted 16 extra zero bits into our bit + // buffer so the decoder can just do its speculative decoding. But if we + // actually consumed any of those bits (which is the case when num_bits < 16), + // the stream actually read past the end so it is malformed. + return stbi__err("unexpected end","Corrupt PNG"); + } + return 1; + } + if (z >= 286) return stbi__err("bad huffman code","Corrupt PNG"); // per DEFLATE, length codes 286 and 287 must not appear in compressed data + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0 || z >= 30) return stbi__err("bad huffman code","Corrupt PNG"); // per DEFLATE, distance codes 30 and 31 must not appear in compressed data + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); + if (len > a->zout_end - zout) { + if (!stbi__zexpand(a, zout, len)) return 0; + zout = a->zout; + } + p = (stbi_uc *) (zout - dist); + if (dist == 1) { // run of one byte; common in images. + stbi_uc v = *p; + if (len) { do *zout++ = v; while (--len); } + } else { + if (len) { do *zout++ = *p++; while (--len); } + } + } + } +} + +static int stbi__compute_huffman_codes(stbi__zbuf *a) +{ + static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286+32+137];//padding for maximum single op + stbi_uc codelength_sizes[19]; + int i,n; + + int hlit = stbi__zreceive(a,5) + 257; + int hdist = stbi__zreceive(a,5) + 1; + int hclen = stbi__zreceive(a,4) + 4; + int ntot = hlit + hdist; + + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i=0; i < hclen; ++i) { + int s = stbi__zreceive(a,3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc) s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; + + n = 0; + while (n < ntot) { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc) c; + else { + stbi_uc fill = 0; + if (c == 16) { + c = stbi__zreceive(a,2)+3; + if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n-1]; + } else if (c == 17) { + c = stbi__zreceive(a,3)+3; + } else if (c == 18) { + c = stbi__zreceive(a,7)+11; + } else { + return stbi__err("bad codelengths", "Corrupt PNG"); + } + if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes+n, fill, c); + n += c; + } + } + if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0; + return 1; +} + +static int stbi__parse_uncompressed_block(stbi__zbuf *a) +{ + stbi_uc header[4]; + int len,nlen,k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) { + header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + if (a->num_bits < 0) return stbi__err("zlib corrupt","Corrupt PNG"); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; +} + +static int stbi__parse_zlib_header(stbi__zbuf *a) +{ + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if (stbi__zeof(a)) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; +} + +static const stbi_uc stbi__zdefault_length[STBI__ZNSYMS] = +{ + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8 +}; +static const stbi_uc stbi__zdefault_distance[32] = +{ + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 +}; +/* +Init algorithm: +{ + int i; // use <= to match clearly with spec + for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8; + for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9; + for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7; + for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8; + + for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; +} +*/ + +static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) +{ + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) return 0; + a->num_bits = 0; + a->code_buffer = 0; + a->hit_zeof_once = 0; + do { + final = stbi__zreceive(a,1); + type = stbi__zreceive(a,2); + if (type == 0) { + if (!stbi__parse_uncompressed_block(a)) return 0; + } else if (type == 3) { + return 0; + } else { + if (type == 1) { + // use fixed code lengths + if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , STBI__ZNSYMS)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; + } else { + if (!stbi__compute_huffman_codes(a)) return 0; + } + if (!stbi__parse_huffman_block(a)) return 0; + } + } while (!final); + return 1; +} + +static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) +{ + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; + + return stbi__parse_zlib(a, parse_header); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) +{ + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int) (a.zout - a.zout_start); + else + return -1; +} + +STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(16384); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer+len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int) (a.zout - a.zout_start); + else + return -1; +} +#endif + +// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 +// simple implementation +// - only 8-bit samples +// - no CRC checking +// - allocates lots of intermediate memory +// - avoids problem of streaming data between subsystems +// - avoids explicit window management +// performance +// - uses stb_zlib, a PD zlib implementation with fast huffman decoding + +#ifndef STBI_NO_PNG +typedef struct +{ + stbi__uint32 length; + stbi__uint32 type; +} stbi__pngchunk; + +static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) +{ + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; +} + +static int stbi__check_png_header(stbi__context *s) +{ + static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + int i; + for (i=0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG"); + return 1; +} + +typedef struct +{ + stbi__context *s; + stbi_uc *idata, *expanded, *out; + int depth; +} stbi__png; + + +enum { + STBI__F_none=0, + STBI__F_sub=1, + STBI__F_up=2, + STBI__F_avg=3, + STBI__F_paeth=4, + // synthetic filter used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first +}; + +static stbi_uc first_row_filter[5] = +{ + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_sub // Paeth with b=c=0 turns out to be equivalent to sub +}; + +static int stbi__paeth(int a, int b, int c) +{ + // This formulation looks very different from the reference in the PNG spec, but is + // actually equivalent and has favorable data dependencies and admits straightforward + // generation of branch-free code, which helps performance significantly. + int thresh = c*3 - (a + b); + int lo = a < b ? a : b; + int hi = a < b ? b : a; + int t0 = (hi <= thresh) ? lo : c; + int t1 = (thresh <= lo) ? hi : t0; + return t1; +} + +static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; + +// adds an extra all-255 alpha channel +// dest == src is legal +// img_n must be 1 or 3 +static void stbi__create_png_alpha_expand8(stbi_uc *dest, stbi_uc *src, stbi__uint32 x, int img_n) +{ + int i; + // must process data backwards since we allow dest==src + if (img_n == 1) { + for (i=x-1; i >= 0; --i) { + dest[i*2+1] = 255; + dest[i*2+0] = src[i]; + } + } else { + STBI_ASSERT(img_n == 3); + for (i=x-1; i >= 0; --i) { + dest[i*4+3] = 255; + dest[i*4+2] = src[i*3+2]; + dest[i*4+1] = src[i*3+1]; + dest[i*4+0] = src[i*3+0]; + } + } +} + +// create the png data from post-deflated data +static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) +{ + int bytes = (depth == 16 ? 2 : 1); + stbi__context *s = a->s; + stbi__uint32 i,j,stride = x*out_n*bytes; + stbi__uint32 img_len, img_width_bytes; + stbi_uc *filter_buf; + int all_ok = 1; + int k; + int img_n = s->img_n; // copy it into a local for later + + int output_bytes = out_n*bytes; + int filter_bytes = img_n*bytes; + int width = x; + + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1); + a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into + if (!a->out) return stbi__err("outofmem", "Out of memory"); + + // note: error exits here don't need to clean up a->out individually, + // stbi__do_png always does on error. + if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG"); + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + if (!stbi__mad2sizes_valid(img_width_bytes, y, img_width_bytes)) return stbi__err("too large", "Corrupt PNG"); + img_len = (img_width_bytes + 1) * y; + + // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, + // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), + // so just check for raw_len < img_len always. + if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); + + // Allocate two scan lines worth of filter workspace buffer. + filter_buf = (stbi_uc *) stbi__malloc_mad2(img_width_bytes, 2, 0); + if (!filter_buf) return stbi__err("outofmem", "Out of memory"); + + // Filtering for low-bit-depth images + if (depth < 8) { + filter_bytes = 1; + width = img_width_bytes; + } + + for (j=0; j < y; ++j) { + // cur/prior filter buffers alternate + stbi_uc *cur = filter_buf + (j & 1)*img_width_bytes; + stbi_uc *prior = filter_buf + (~j & 1)*img_width_bytes; + stbi_uc *dest = a->out + stride*j; + int nk = width * filter_bytes; + int filter = *raw++; + + // check filter type + if (filter > 4) { + all_ok = stbi__err("invalid filter","Corrupt PNG"); + break; + } + + // if first row, use special filter that doesn't sample previous row + if (j == 0) filter = first_row_filter[filter]; + + // perform actual filtering + switch (filter) { + case STBI__F_none: + memcpy(cur, raw, nk); + break; + case STBI__F_sub: + memcpy(cur, raw, filter_bytes); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); + break; + case STBI__F_up: + for (k = 0; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + prior[k]); + break; + case STBI__F_avg: + for (k = 0; k < filter_bytes; ++k) + cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); + break; + case STBI__F_paeth: + for (k = 0; k < filter_bytes; ++k) + cur[k] = STBI__BYTECAST(raw[k] + prior[k]); // prior[k] == stbi__paeth(0,prior[k],0) + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes], prior[k], prior[k-filter_bytes])); + break; + case STBI__F_avg_first: + memcpy(cur, raw, filter_bytes); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); + break; + } + + raw += nk; + + // expand decoded bits in cur to dest, also adding an extra alpha channel if desired + if (depth < 8) { + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + stbi_uc *in = cur; + stbi_uc *out = dest; + stbi_uc inb = 0; + stbi__uint32 nsmp = x*img_n; + + // expand bits to bytes first + if (depth == 4) { + for (i=0; i < nsmp; ++i) { + if ((i & 1) == 0) inb = *in++; + *out++ = scale * (inb >> 4); + inb <<= 4; + } + } else if (depth == 2) { + for (i=0; i < nsmp; ++i) { + if ((i & 3) == 0) inb = *in++; + *out++ = scale * (inb >> 6); + inb <<= 2; + } + } else { + STBI_ASSERT(depth == 1); + for (i=0; i < nsmp; ++i) { + if ((i & 7) == 0) inb = *in++; + *out++ = scale * (inb >> 7); + inb <<= 1; + } + } + + // insert alpha=255 values if desired + if (img_n != out_n) + stbi__create_png_alpha_expand8(dest, dest, x, img_n); + } else if (depth == 8) { + if (img_n == out_n) + memcpy(dest, cur, x*img_n); + else + stbi__create_png_alpha_expand8(dest, cur, x, img_n); + } else if (depth == 16) { + // convert the image data from big-endian to platform-native + stbi__uint16 *dest16 = (stbi__uint16*)dest; + stbi__uint32 nsmp = x*img_n; + + if (img_n == out_n) { + for (i = 0; i < nsmp; ++i, ++dest16, cur += 2) + *dest16 = (cur[0] << 8) | cur[1]; + } else { + STBI_ASSERT(img_n+1 == out_n); + if (img_n == 1) { + for (i = 0; i < x; ++i, dest16 += 2, cur += 2) { + dest16[0] = (cur[0] << 8) | cur[1]; + dest16[1] = 0xffff; + } + } else { + STBI_ASSERT(img_n == 3); + for (i = 0; i < x; ++i, dest16 += 4, cur += 6) { + dest16[0] = (cur[0] << 8) | cur[1]; + dest16[1] = (cur[2] << 8) | cur[3]; + dest16[2] = (cur[4] << 8) | cur[5]; + dest16[3] = 0xffff; + } + } + } + } + } + + STBI_FREE(filter_buf); + if (!all_ok) return 0; + + return 1; +} + +static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) +{ + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + + // de-interlacing + final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + if (!final) return stbi__err("outofmem", "Out of memory"); + for (p=0; p < 7; ++p) { + int xorig[] = { 0,4,0,2,0,1,0 }; + int yorig[] = { 0,0,4,0,2,0,1 }; + int xspc[] = { 8,8,4,4,2,2,1 }; + int yspc[] = { 8,8,8,4,4,2,2 }; + int i,j,x,y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p]; + if (x && y) { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { + STBI_FREE(final); + return 0; + } + for (j=0; j < y; ++j) { + for (i=0; i < x; ++i) { + int out_y = j*yspc[p]+yorig[p]; + int out_x = i*xspc[p]+xorig[p]; + memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, + a->out + (j*x+i)*out_bytes, out_bytes); + } + } + STBI_FREE(a->out); + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; + + return 1; +} + +static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i=0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } else { + for (i=0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi__uint16 *p = (stbi__uint16*) z->out; + + // compute color-based transparency, assuming we've + // already got 65535 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 65535); + p += 2; + } + } else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) +{ + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; + + p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); + if (p == NULL) return stbi__err("outofmem", "Out of memory"); + + // between here and free(out) below, exitting would leak + temp_out = p; + + if (pal_img_n == 3) { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p += 3; + } + } else { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p[3] = palette[n+3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; + + STBI_NOTUSED(len); + + return 1; +} + +static int stbi__unpremultiply_on_load_global = 0; +static int stbi__de_iphone_flag_global = 0; + +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load_global = flag_true_if_should_unpremultiply; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag_global = flag_true_if_should_convert; +} + +#ifndef STBI_THREAD_LOCAL +#define stbi__unpremultiply_on_load stbi__unpremultiply_on_load_global +#define stbi__de_iphone_flag stbi__de_iphone_flag_global +#else +static STBI_THREAD_LOCAL int stbi__unpremultiply_on_load_local, stbi__unpremultiply_on_load_set; +static STBI_THREAD_LOCAL int stbi__de_iphone_flag_local, stbi__de_iphone_flag_set; + +STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load_local = flag_true_if_should_unpremultiply; + stbi__unpremultiply_on_load_set = 1; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag_local = flag_true_if_should_convert; + stbi__de_iphone_flag_set = 1; +} + +#define stbi__unpremultiply_on_load (stbi__unpremultiply_on_load_set \ + ? stbi__unpremultiply_on_load_local \ + : stbi__unpremultiply_on_load_global) +#define stbi__de_iphone_flag (stbi__de_iphone_flag_set \ + ? stbi__de_iphone_flag_local \ + : stbi__de_iphone_flag_global) +#endif // STBI_THREAD_LOCAL + +static void stbi__de_iphone(stbi__png *z) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + if (s->img_out_n == 3) { // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } else { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) { + // convert bgr to rgb and unpremultiply + for (i=0; i < pixel_count; ++i) { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) { + stbi_uc half = a / 2; + p[0] = (p[2] * 255 + half) / a; + p[1] = (p[1] * 255 + half) / a; + p[2] = ( t * 255 + half) / a; + } else { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } else { + // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } +} + +#define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d)) + +static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) +{ + stbi_uc palette[1024], pal_img_n=0; + stbi_uc has_trans=0, tc[3]={0}; + stbi__uint16 tc16[3]; + stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; + int first=1,k,interlace=0, color=0, is_iphone=0; + stbi__context *s = z->s; + + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; + + if (!stbi__check_png_header(s)) return 0; + + if (scan == STBI__SCAN_type) return 1; + + for (;;) { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) { + case STBI__PNG_TYPE('C','g','B','I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I','H','D','R'): { + int comp,filter; + if (!first) return stbi__err("multiple IHDR","Corrupt PNG"); + first = 0; + if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG"); + s->img_x = stbi__get32be(s); + s->img_y = stbi__get32be(s); + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); + color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG"); + comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG"); + filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG"); + interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG"); + if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG"); + if (!pal_img_n) { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + } else { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); + } + // even with SCAN_header, have to scan to see if we have a tRNS + break; + } + + case STBI__PNG_TYPE('P','L','T','E'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG"); + for (i=0; i < pal_len; ++i) { + palette[i*4+0] = stbi__get8(s); + palette[i*4+1] = stbi__get8(s); + palette[i*4+2] = stbi__get8(s); + palette[i*4+3] = 255; + } + break; + } + + case STBI__PNG_TYPE('t','R','N','S'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG"); + if (pal_img_n) { + if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } + if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG"); + if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG"); + pal_img_n = 4; + for (i=0; i < c.length; ++i) + palette[i*4+3] = stbi__get8(s); + } else { + if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); + if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); + has_trans = 1; + // non-paletted with tRNS = constant alpha. if header-scanning, we can stop now. + if (scan == STBI__SCAN_header) { ++s->img_n; return 1; } + if (z->depth == 16) { + for (k = 0; k < s->img_n && k < 3; ++k) // extra loop test to suppress false GCC warning + tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + } else { + for (k = 0; k < s->img_n && k < 3; ++k) + tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + } + } + break; + } + + case STBI__PNG_TYPE('I','D','A','T'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); + if (scan == STBI__SCAN_header) { + // header scan definitely stops at first IDAT + if (pal_img_n) + s->img_n = pal_img_n; + return 1; + } + if (c.length > (1u << 30)) return stbi__err("IDAT size limit", "IDAT section larger than 2^30 bytes"); + if ((int)(ioff + c.length) < (int)ioff) return 0; + if (ioff + c.length > idata_limit) { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG"); + ioff += c.length; + break; + } + + case STBI__PNG_TYPE('I','E','N','D'): { + stbi__uint32 raw_len, bpl; + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) return 1; + if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone); + if (z->expanded == NULL) return 0; // zlib should set error + STBI_FREE(z->idata); z->idata = NULL; + if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n+1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0; + if (has_trans) { + if (z->depth == 16) { + if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0; + } else { + if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; + } + } + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } else if (has_trans) { + // non-paletted image with tRNS -> source image has (constant) alpha + ++s->img_n; + } + STBI_FREE(z->expanded); z->expanded = NULL; + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + return 1; + } + + default: + // if critical, fail + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) { + #ifndef STBI_NO_FAILURE_STRINGS + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); + #endif + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } +} + +static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) +{ + void *result=NULL; + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { + if (p->depth <= 8) + ri->bits_per_channel = 8; + else if (p->depth == 16) + ri->bits_per_channel = 16; + else + return stbi__errpuc("bad bits_per_channel", "PNG not supported: unsupported color depth"); + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) { + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) *n = p->s->img_n; + } + STBI_FREE(p->out); p->out = NULL; + STBI_FREE(p->expanded); p->expanded = NULL; + STBI_FREE(p->idata); p->idata = NULL; + + return result; +} + +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi__png p; + p.s = s; + return stbi__do_png(&p, x,y,comp,req_comp, ri); +} + +static int stbi__png_test(stbi__context *s) +{ + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; +} + +static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) +{ + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { + stbi__rewind( p->s ); + return 0; + } + if (x) *x = p->s->img_x; + if (y) *y = p->s->img_y; + if (comp) *comp = p->s->img_n; + return 1; +} + +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); +} + +static int stbi__png_is16(stbi__context *s) +{ + stbi__png p; + p.s = s; + if (!stbi__png_info_raw(&p, NULL, NULL, NULL)) + return 0; + if (p.depth != 16) { + stbi__rewind(p.s); + return 0; + } + return 1; +} +#endif + +// Microsoft/Windows BMP image + +#ifndef STBI_NO_BMP +static int stbi__bmp_test_raw(stbi__context *s) +{ + int r; + int sz; + if (stbi__get8(s) != 'B') return 0; + if (stbi__get8(s) != 'M') return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; +} + +static int stbi__bmp_test(stbi__context *s) +{ + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; +} + + +// returns 0..31 for the highest set bit +static int stbi__high_bit(unsigned int z) +{ + int n=0; + if (z == 0) return -1; + if (z >= 0x10000) { n += 16; z >>= 16; } + if (z >= 0x00100) { n += 8; z >>= 8; } + if (z >= 0x00010) { n += 4; z >>= 4; } + if (z >= 0x00004) { n += 2; z >>= 2; } + if (z >= 0x00002) { n += 1;/* >>= 1;*/ } + return n; +} + +static int stbi__bitcount(unsigned int a) +{ + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; +} + +// extract an arbitrarily-aligned N-bit value (N=bits) +// from v, and then make it 8-bits long and fractionally +// extend it to full full range. +static int stbi__shiftsigned(unsigned int v, int shift, int bits) +{ + static unsigned int mul_table[9] = { + 0, + 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/, + 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/, + }; + static unsigned int shift_table[9] = { + 0, 0,0,1,0,2,4,6,0, + }; + if (shift < 0) + v <<= -shift; + else + v >>= shift; + STBI_ASSERT(v < 256); + v >>= (8-bits); + STBI_ASSERT(bits >= 0 && bits <= 8); + return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits]; +} + +typedef struct +{ + int bpp, offset, hsz; + unsigned int mr,mg,mb,ma, all_a; + int extra_read; +} stbi__bmp_data; + +static int stbi__bmp_set_mask_defaults(stbi__bmp_data *info, int compress) +{ + // BI_BITFIELDS specifies masks explicitly, don't override + if (compress == 3) + return 1; + + if (compress == 0) { + if (info->bpp == 16) { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } else if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } else { + // otherwise, use defaults, which is all-0 + info->mr = info->mg = info->mb = info->ma = 0; + } + return 1; + } + return 0; // error +} + +static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) +{ + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + info->mr = info->mg = info->mb = info->ma = 0; + info->extra_read = 14; + + if (info->offset < 0) return stbi__errpuc("bad BMP", "bad BMP"); + + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } else { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (hsz != 12) { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + if (compress >= 4) return stbi__errpuc("BMP JPEG/PNG", "BMP type not supported: unsupported compression"); // this includes PNG/JPEG modes + if (compress == 3 && info->bpp != 16 && info->bpp != 32) return stbi__errpuc("bad BMP", "bad BMP"); // bitfields requires 16 or 32 bits/pixel + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) { + if (hsz == 56) { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) { + if (compress == 0) { + stbi__bmp_set_mask_defaults(info, compress); + } else if (compress == 3) { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->extra_read += 12; + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else { + // V4/V5 header + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + if (compress != 3) // override mr/mg/mb unless in BI_BITFIELDS mode, as per docs + stbi__bmp_set_mask_defaults(info, compress); + stbi__get32le(s); // discard color space + for (i=0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *) 1; +} + + +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + unsigned int mr=0,mg=0,mb=0,ma=0, all_a; + stbi_uc pal[256][4]; + int psize=0,i,j,width; + int flip_vertically, pad, target; + stbi__bmp_data info; + STBI_NOTUSED(ri); + + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set + + flip_vertically = ((int) s->img_y) > 0; + s->img_y = abs((int) s->img_y); + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; + + if (info.hsz == 12) { + if (info.bpp < 24) + psize = (info.offset - info.extra_read - 24) / 3; + } else { + if (info.bpp < 16) + psize = (info.offset - info.extra_read - info.hsz) >> 2; + } + if (psize == 0) { + // accept some number of extra bytes after the header, but if the offset points either to before + // the header ends or implies a large amount of extra data, reject the file as malformed + int bytes_read_so_far = s->callback_already_read + (int)(s->img_buffer - s->img_buffer_original); + int header_limit = 1024; // max we actually read is below 256 bytes currently. + int extra_data_limit = 256*4; // what ordinarily goes here is a palette; 256 entries*4 bytes is its max size. + if (bytes_read_so_far <= 0 || bytes_read_so_far > header_limit) { + return stbi__errpuc("bad header", "Corrupt BMP"); + } + // we established that bytes_read_so_far is positive and sensible. + // the first half of this test rejects offsets that are either too small positives, or + // negative, and guarantees that info.offset >= bytes_read_so_far > 0. this in turn + // ensures the number computed in the second half of the test can't overflow. + if (info.offset < bytes_read_so_far || info.offset - bytes_read_so_far > extra_data_limit) { + return stbi__errpuc("bad offset", "Corrupt BMP"); + } else { + stbi__skip(s, info.offset - bytes_read_so_far); + } + } + + if (info.bpp == 24 && ma == 0xff000000) + s->img_n = 3; + else + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert + + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); + + out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) { + int z=0; + if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } + for (i=0; i < psize; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - info.extra_read - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 1) width = (s->img_x + 7) >> 3; + else if (info.bpp == 4) width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) width = s->img_x; + else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } + pad = (-width)&3; + if (info.bpp == 1) { + for (j=0; j < (int) s->img_y; ++j) { + int bit_offset = 7, v = stbi__get8(s); + for (i=0; i < (int) s->img_x; ++i) { + int color = (v>>bit_offset)&0x1; + out[z++] = pal[color][0]; + out[z++] = pal[color][1]; + out[z++] = pal[color][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + if((--bit_offset) < 0) { + bit_offset = 7; + v = stbi__get8(s); + } + } + stbi__skip(s, pad); + } + } else { + for (j=0; j < (int) s->img_y; ++j) { + for (i=0; i < (int) s->img_x; i += 2) { + int v=stbi__get8(s),v2=0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); + } + } + } else { + int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; + int z = 0; + int easy=0; + stbi__skip(s, info.offset - info.extra_read - info.hsz); + if (info.bpp == 24) width = 3 * s->img_x; + else if (info.bpp == 16) width = 2*s->img_x; + else /* bpp = 32 and pad = 0 */ width=0; + pad = (-width) & 3; + if (info.bpp == 24) { + easy = 1; + } else if (info.bpp == 32) { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) { + if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); + if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + } + for (j=0; j < (int) s->img_y; ++j) { + if (easy) { + for (i=0; i < (int) s->img_x; ++i) { + unsigned char a; + out[z+2] = stbi__get8(s); + out[z+1] = stbi__get8(s); + out[z+0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) out[z++] = a; + } + } else { + int bpp = info.bpp; + for (i=0; i < (int) s->img_x; ++i) { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); + unsigned int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } + + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) + out[i] = 255; + + if (flip_vertically) { + stbi_uc t; + for (j=0; j < (int) s->img_y>>1; ++j) { + stbi_uc *p1 = out + j *s->img_x*target; + stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; + for (i=0; i < (int) s->img_x*target; ++i) { + t = p1[i]; p1[i] = p2[i]; p2[i] = t; + } + } + } + + if (req_comp && req_comp != target) { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + return out; +} +#endif + +// Targa Truevision - TGA +// by Jonathan Dummer +#ifndef STBI_NO_TGA +// returns STBI_rgb or whatever, 0 on error +static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) +{ + // only RGB or RGBA (incl. 16bit) or grey allowed + if (is_rgb16) *is_rgb16 = 0; + switch(bits_per_pixel) { + case 8: return STBI_grey; + case 16: if(is_grey) return STBI_grey_alpha; + // fallthrough + case 15: if(is_rgb16) *is_rgb16 = 1; + return STBI_rgb; + case 24: // fallthrough + case 32: return bits_per_pixel/8; + default: return 0; + } +} + +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) +{ + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if( tga_colormap_type > 1 ) { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if ( tga_colormap_type == 1 ) { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip image x and y origin + tga_colormap_bpp = sz; + } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s,9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if( tga_w < 1 ) { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if( tga_h < 1 ) { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) { + if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } else { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if(!tga_comp) { + stbi__rewind(s); + return 0; + } + if (x) *x = tga_w; + if (y) *y = tga_h; + if (comp) *comp = tga_comp; + return 1; // seems to have passed everything +} + +static int stbi__tga_test(stbi__context *s) +{ + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if ( tga_color_type == 1 ) { // colormapped (paletted) image + if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + stbi__skip(s,4); // skip image x and y origin + } else { // "normal" image w/o colormap + if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s,9); // skip colormap specification and image x/y origin + } + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + +errorEnd: + stbi__rewind(s); + return res; +} + +// read 16bit value and convert to 24bit RGB +static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +{ + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (stbi_uc)((r * 255)/31); + out[1] = (stbi_uc)((g * 255)/31); + out[2] = (stbi_uc)((b * 255)/31); + + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. +} + +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16=0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4] = {0}; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + STBI_NOTUSED(ri); + STBI_NOTUSED(tga_x_origin); // @TODO + STBI_NOTUSED(tga_y_origin); // @TODO + + if (tga_height > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (tga_width > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + // do a tiny bit of precessing + if ( tga_image_type >= 8 ) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); + + // If I'm paletted, then I'll use the number of bits from the palette + if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + + if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + + // tga info + *x = tga_width; + *y = tga_height; + if (comp) *comp = tga_comp; + + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); + + tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); + if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); + + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset ); + + if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) { + for (i=0; i < tga_height; ++i) { + int row = tga_inverted ? tga_height -i - 1 : i; + stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } else { + // do I need to load a palette? + if ( tga_indexed) + { + if (tga_palette_len == 0) { /* you have to have at least one entry! */ + STBI_FREE(tga_data); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start ); + // load the palette + tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); + if (!tga_palette) { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i=0; i < tga_palette_len; ++i) { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i=0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if ( tga_is_RLE ) + { + if ( RLE_count == 0 ) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } else if ( !RLE_repeating ) + { + read_next_pixel = 1; + } + } else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if ( read_next_pixel ) + { + // load however much data we did have + if ( tga_indexed ) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if ( pal_idx >= tga_palette_len ) { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = tga_palette[pal_idx+j]; + } + } else if(tga_rgb16) { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } else { + // read in the data raw + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel + + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i*tga_comp+j] = raw_data[j]; + + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if ( tga_inverted ) + { + for (j = 0; j*2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if ( tga_palette != NULL ) + { + STBI_FREE( tga_palette ); + } + } + + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char* tga_pixel = tga_data; + for (i=0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } + + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + STBI_NOTUSED(tga_palette_start); + // OK, done + return tga_data; +} +#endif + +// ************************************************************************************************* +// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s) +{ + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; +} + +static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) +{ + int count, nleft, len; + + count = 0; + while ((nleft = pixelCount - count) > 0) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) return 0; // corrupt data + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + + return 1; +} + +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + int pixelCount; + int channelCount, compression; + int channel, i; + int bitdepth; + int w,h; + stbi_uc *out; + STBI_NOTUSED(ri); + + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); + + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + + // Skip 6 reserved bytes. + stbi__skip(s, 6 ); + + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); + + if (h > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (w > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s,stbi__get32be(s) ); + + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s) ); + + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s) ); + + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); + + // Create the destination image. + + if (!compression && bitdepth == 16 && bpc == 16) { + out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } else + out = (stbi_uc *) stbi__malloc(4 * w*h); + + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w*h; + + // Initialize the data to zero. + //memset( out, 0, pixelCount * 4 ); + + // Finally, the image data. + if (compression) { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop + + // The RLE-compressed data is preceded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2 ); + + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out+channel; + if (channel >= channelCount) { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } else { + // Read the RLE data. + if (!stbi__psd_decode_rle(s, p, pixelCount)) { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); + } + } + } + + } else { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. + + // Read the data by channel. + for (channel = 0; channel < 4; channel++) { + if (channel >= channelCount) { + // Fill this channel with default data. + if (bitdepth == 16 && bpc == 16) { + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; + } else { + stbi_uc *p = out+channel; + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } + } else { + if (ri->bits_per_channel == 16) { // output bpc + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16) stbi__get16be(s); + } else { + stbi_uc *p = out+channel; + if (bitdepth == 16) { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc) (stbi__get16be(s) >> 8); + } else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + } + + // remove weird white matte from PSD + if (channelCount >= 4) { + if (ri->bits_per_channel == 16) { + for (i=0; i < w*h; ++i) { + stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i; + if (pixel[3] != 0 && pixel[3] != 65535) { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a); + pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a); + pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a); + } + } + } else { + for (i=0; i < w*h; ++i) { + unsigned char *pixel = out + 4*i; + if (pixel[3] != 0 && pixel[3] != 255) { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char) (pixel[0]*ra + inv_a); + pixel[1] = (unsigned char) (pixel[1]*ra + inv_a); + pixel[2] = (unsigned char) (pixel[2]*ra + inv_a); + } + } + } + } + + // convert to desired output format + if (req_comp && req_comp != 4) { + if (ri->bits_per_channel == 16) + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + if (comp) *comp = 4; + *y = h; + *x = w; + + return out; +} +#endif + +// ************************************************************************************************* +// Softimage PIC loader +// by Tom Seddon +// +// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format +// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ + +#ifndef STBI_NO_PIC +static int stbi__pic_is4(stbi__context *s,const char *str) +{ + int i; + for (i=0; i<4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; + + return 1; +} + +static int stbi__pic_test_core(stbi__context *s) +{ + int i; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) + return 0; + + for(i=0;i<84;++i) + stbi__get8(s); + + if (!stbi__pic_is4(s,"PICT")) + return 0; + + return 1; +} + +typedef struct +{ + stbi_uc size,type,channel; +} stbi__pic_packet; + +static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) +{ + int mask=0x80, i; + + for (i=0; i<4; ++i, mask>>=1) { + if (channel & mask) { + if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short"); + dest[i]=stbi__get8(s); + } + } + + return dest; +} + +static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src) +{ + int mask=0x80,i; + + for (i=0;i<4; ++i, mask>>=1) + if (channel&mask) + dest[i]=src[i]; +} + +static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result) +{ + int act_comp=0,num_packets=0,y,chained; + stbi__pic_packet packets[10]; + + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return stbi__errpuc("bad format","too many packets"); + + packet = &packets[num_packets++]; + + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + + act_comp |= packet->channel; + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)"); + if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp"); + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + + for(y=0; ytype) { + default: + return stbi__errpuc("bad format","packet has bad compression type"); + + case 0: {//uncompressed + int x; + + for(x=0;xchannel,dest)) + return 0; + break; + } + + case 1://Pure RLE + { + int left=width, i; + + while (left>0) { + stbi_uc count,value[4]; + + count=stbi__get8(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)"); + + if (count > left) + count = (stbi_uc) left; + + if (!stbi__readval(s,packet->channel,value)) return 0; + + for(i=0; ichannel,dest,value); + left -= count; + } + } + break; + + case 2: {//Mixed RLE + int left=width; + while (left>0) { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)"); + + if (count >= 128) { // Repeated + stbi_uc value[4]; + + if (count==128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file","scanline overrun"); + + if (!stbi__readval(s,packet->channel,value)) + return 0; + + for(i=0;ichannel,dest,value); + } else { // Raw + ++count; + if (count>left) return stbi__errpuc("bad file","scanline overrun"); + + for(i=0;ichannel,dest)) + return 0; + } + left-=count; + } + break; + } + } + } + } + + return result; +} + +static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri) +{ + stbi_uc *result; + int i, x,y, internal_comp; + STBI_NOTUSED(ri); + + if (!comp) comp = &internal_comp; + + for (i=0; i<92; ++i) + stbi__get8(s); + + x = stbi__get16be(s); + y = stbi__get16be(s); + + if (y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); + + stbi__get32be(s); //skip `ratio' + stbi__get16be(s); //skip `fields' + stbi__get16be(s); //skip `pad' + + // intermediate buffer is RGBA + result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0); + if (!result) return stbi__errpuc("outofmem", "Out of memory"); + memset(result, 0xff, x*y*4); + + if (!stbi__pic_load_core(s,x,y,comp, result)) { + STBI_FREE(result); + result=0; + } + *px = x; + *py = y; + if (req_comp == 0) req_comp = *comp; + result=stbi__convert_format(result,4,req_comp,x,y); + + return result; +} + +static int stbi__pic_test(stbi__context *s) +{ + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; +} +#endif + +// ************************************************************************************************* +// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb + +#ifndef STBI_NO_GIF +typedef struct +{ + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; +} stbi__gif_lzw; + +typedef struct +{ + int w,h; + stbi_uc *out; // output buffer (always 4 components) + stbi_uc *background; // The current "background" as far as a gif is concerned + stbi_uc *history; + int flags, bgindex, ratio, transparent, eflags; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[8192]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; + int delay; +} stbi__gif; + +static int stbi__gif_test_raw(stbi__context *s) +{ + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') return 0; + if (stbi__get8(s) != 'a') return 0; + return 1; +} + +static int stbi__gif_test(stbi__context *s) +{ + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; +} + +static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) +{ + int i; + for (i=0; i < num_entries; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } +} + +static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) +{ + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); + + version = stbi__get8(s); + if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); + + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; + + if (g->w > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (g->h > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + + if (is_info) return 1; + + if (g->flags & 0x80) + stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1); + + return 1; +} + +static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); + if (!g) return stbi__err("outofmem", "Out of memory"); + if (!stbi__gif_header(s, g, comp, 1)) { + STBI_FREE(g); + stbi__rewind( s ); + return 0; + } + if (x) *x = g->w; + if (y) *y = g->h; + STBI_FREE(g); + return 1; +} + +static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) +{ + stbi_uc *p, *c; + int idx; + + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); + + if (g->cur_y >= g->max_y) return; + + idx = g->cur_x + g->cur_y; + p = &g->out[idx]; + g->history[idx / 4] = 1; + + c = &g->color_table[g->codes[code].suffix * 4]; + if (c[3] > 128) { // don't render transparent pixels; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; + + if (g->cur_x >= g->max_x) { + g->cur_x = g->start_x; + g->cur_y += g->step; + + while (g->cur_y >= g->max_y && g->parse > 0) { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } +} + +static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) +{ + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; + + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc) init_code; + g->codes[init_code].suffix = (stbi_uc) init_code; + } + + // support no starting clear code + avail = clear+2; + oldcode = -1; + + len = 0; + for(;;) { + if (valid_bits < codesize) { + if (len == 0) { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32) stbi__get8(s) << valid_bits; + valid_bits += 8; + } else { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } else if (code == clear + 1) { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s,len); + return g->out; + } else if (code <= avail) { + if (first) { + return stbi__errpuc("no clear code", "Corrupt GIF"); + } + + if (oldcode >= 0) { + p = &g->codes[avail++]; + if (avail > 8192) { + return stbi__errpuc("too many codes", "Corrupt GIF"); + } + + p->prefix = (stbi__int16) oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + + stbi__out_gif_code(g, (stbi__uint16) code); + + if ((avail & codemask) == 0 && avail <= 0x0FFF) { + codesize++; + codemask = (1 << codesize) - 1; + } + + oldcode = code; + } else { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } +} + +// this function is designed to support animated gifs, although stb_image doesn't support it +// two back is the image from two frames ago, used for a very specific disposal format +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back) +{ + int dispose; + int first_frame; + int pi; + int pcount; + STBI_NOTUSED(req_comp); + + // on first frame, any non-written pixels get the background colour (non-transparent) + first_frame = 0; + if (g->out == 0) { + if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header + if (!stbi__mad3sizes_valid(4, g->w, g->h, 0)) + return stbi__errpuc("too large", "GIF image is too large"); + pcount = g->w * g->h; + g->out = (stbi_uc *) stbi__malloc(4 * pcount); + g->background = (stbi_uc *) stbi__malloc(4 * pcount); + g->history = (stbi_uc *) stbi__malloc(pcount); + if (!g->out || !g->background || !g->history) + return stbi__errpuc("outofmem", "Out of memory"); + + // image is treated as "transparent" at the start - ie, nothing overwrites the current background; + // background colour is only used for pixels that are not rendered first frame, after that "background" + // color refers to the color that was there the previous frame. + memset(g->out, 0x00, 4 * pcount); + memset(g->background, 0x00, 4 * pcount); // state of the background (starts transparent) + memset(g->history, 0x00, pcount); // pixels that were affected previous frame + first_frame = 1; + } else { + // second frame - how do we dispose of the previous one? + dispose = (g->eflags & 0x1C) >> 2; + pcount = g->w * g->h; + + if ((dispose == 3) && (two_back == 0)) { + dispose = 2; // if I don't have an image to revert back to, default to the old background + } + + if (dispose == 3) { // use previous graphic + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 ); + } + } + } else if (dispose == 2) { + // restore what was changed last frame to background before that frame; + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 ); + } + } + } else { + // This is a non-disposal case eithe way, so just + // leave the pixels as is, and they will become the new background + // 1: do not dispose + // 0: not specified. + } + + // background is what out is after the undoing of the previou frame; + memcpy( g->background, g->out, 4 * g->w * g->h ); + } + + // clear my history; + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + + for (;;) { + int tag = stbi__get8(s); + switch (tag) { + case 0x2C: /* Image Descriptor */ + { + stbi__int32 x, y, w, h; + stbi_uc *o; + + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; + + // if the width of the specified rectangle is 0, that means + // we may not see *any* pixels or the image is malformed; + // to make sure this is caught, move the current y down to + // max_y (which is what out_gif_code checks). + if (w == 0) + g->cur_y = g->max_y; + + g->lflags = stbi__get8(s); + + if (g->lflags & 0x40) { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } else { + g->step = g->line_size; + g->parse = 0; + } + + if (g->lflags & 0x80) { + stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *) g->lpal; + } else if (g->flags & 0x80) { + g->color_table = (stbi_uc *) g->pal; + } else + return stbi__errpuc("missing color table", "Corrupt GIF"); + + o = stbi__process_gif_raster(s, g); + if (!o) return NULL; + + // if this was the first frame, + pcount = g->w * g->h; + if (first_frame && (g->bgindex > 0)) { + // if first frame, any pixel not drawn to gets the background color + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi] == 0) { + g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be; + memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 ); + } + } + } + + return o; + } + + case 0x21: // Comment Extension. + { + int len; + int ext = stbi__get8(s); + if (ext == 0xF9) { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) { + g->eflags = stbi__get8(s); + g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths. + + // unset old transparent + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 255; + } + if (g->eflags & 0x01) { + g->transparent = stbi__get8(s); + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 0; + } + } else { + // don't need transparent + stbi__skip(s, 1); + g->transparent = -1; + } + } else { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) { + stbi__skip(s, len); + } + break; + } + + case 0x3B: // gif stream termination code + return (stbi_uc *) s; // using '1' causes warning on some compilers + + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } +} + +static void *stbi__load_gif_main_outofmem(stbi__gif *g, stbi_uc *out, int **delays) +{ + STBI_FREE(g->out); + STBI_FREE(g->history); + STBI_FREE(g->background); + + if (out) STBI_FREE(out); + if (delays && *delays) STBI_FREE(*delays); + return stbi__errpuc("outofmem", "Out of memory"); +} + +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + if (stbi__gif_test(s)) { + int layers = 0; + stbi_uc *u = 0; + stbi_uc *out = 0; + stbi_uc *two_back = 0; + stbi__gif g; + int stride; + int out_size = 0; + int delays_size = 0; + + STBI_NOTUSED(out_size); + STBI_NOTUSED(delays_size); + + memset(&g, 0, sizeof(g)); + if (delays) { + *delays = 0; + } + + do { + u = stbi__gif_load_next(s, &g, comp, req_comp, two_back); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + + if (u) { + *x = g.w; + *y = g.h; + ++layers; + stride = g.w * g.h * 4; + + if (out) { + void *tmp = (stbi_uc*) STBI_REALLOC_SIZED( out, out_size, layers * stride ); + if (!tmp) + return stbi__load_gif_main_outofmem(&g, out, delays); + else { + out = (stbi_uc*) tmp; + out_size = layers * stride; + } + + if (delays) { + int *new_delays = (int*) STBI_REALLOC_SIZED( *delays, delays_size, sizeof(int) * layers ); + if (!new_delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + *delays = new_delays; + delays_size = layers * sizeof(int); + } + } else { + out = (stbi_uc*)stbi__malloc( layers * stride ); + if (!out) + return stbi__load_gif_main_outofmem(&g, out, delays); + out_size = layers * stride; + if (delays) { + *delays = (int*) stbi__malloc( layers * sizeof(int) ); + if (!*delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + delays_size = layers * sizeof(int); + } + } + memcpy( out + ((layers - 1) * stride), u, stride ); + if (layers >= 2) { + two_back = out - 2 * stride; + } + + if (delays) { + (*delays)[layers - 1U] = g.delay; + } + } + } while (u != 0); + + // free temp buffer; + STBI_FREE(g.out); + STBI_FREE(g.history); + STBI_FREE(g.background); + + // do the final conversion after loading everything; + if (req_comp && req_comp != 4) + out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h); + + *z = layers; + return out; + } else { + return stbi__errpuc("not GIF", "Image was not as a gif type."); + } +} + +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *u = 0; + stbi__gif g; + memset(&g, 0, sizeof(g)); + STBI_NOTUSED(ri); + + u = stbi__gif_load_next(s, &g, comp, req_comp, 0); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + if (u) { + *x = g.w; + *y = g.h; + + // moved conversion to after successful load so that the same + // can be done for multiple frames. + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g.w, g.h); + } else if (g.out) { + // if there was an error and we allocated an image buffer, free it! + STBI_FREE(g.out); + } + + // free buffers needed for multiple frame loading; + STBI_FREE(g.history); + STBI_FREE(g.background); + + return u; +} + +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) +{ + return stbi__gif_info_raw(s,x,y,comp); +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR loader +// originally by Nicolas Schulz +#ifndef STBI_NO_HDR +static int stbi__hdr_test_core(stbi__context *s, const char *signature) +{ + int i; + for (i=0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + stbi__rewind(s); + return 1; +} + +static int stbi__hdr_test(stbi__context* s) +{ + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); + stbi__rewind(s); + if(!r) { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } + return r; +} + +#define STBI__HDR_BUFLEN 1024 +static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) +{ + int len=0; + char c = '\0'; + + c = (char) stbi__get8(z); + + while (!stbi__at_eof(z) && c != '\n') { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN-1) { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char) stbi__get8(z); + } + + buffer[len] = 0; + return buffer; +} + +static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) +{ + if ( input[3] != 0 ) { + float f1; + // Exponent + f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) output[1] = 1; + if (req_comp == 4) output[3] = 1; + } else { + switch (req_comp) { + case 4: output[3] = 1; /* fallthrough */ + case 3: output[0] = output[1] = output[2] = 0; + break; + case 2: output[1] = 1; /* fallthrough */ + case 1: output[0] = 0; + break; + } + } +} + +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1,c2, z; + const char *headerToken; + STBI_NOTUSED(ri); + + // Check identifier + headerToken = stbi__hdr_gettoken(s,buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); + + // Parse header + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); + + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int) strtol(token, NULL, 10); + + if (height > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + if (width > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + + *x = width; + *y = height; + + if (comp) *comp = 3; + if (req_comp == 0) req_comp = 3; + + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); + + // Read data + hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); + + // Load image data + // image data is stored as some number of sca + if ( width < 8 || width >= 32768) { + // Read flat data + for (j=0; j < height; ++j) { + for (i=0; i < width; ++i) { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } else { + // Read RLE-encoded data + scanline = NULL; + + for (j = 0; j < height; ++j) { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc) c1; + rgbe[1] = (stbi_uc) c2; + rgbe[2] = (stbi_uc) len; + rgbe[3] = (stbi_uc) stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } + if (scanline == NULL) { + scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0); + if (!scanline) { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } + + for (k = 0; k < 4; ++k) { + int nleft; + i = 0; + while ((nleft = width - i) > 0) { + count = stbi__get8(s); + if (count > 128) { + // Run + value = stbi__get8(s); + count -= 128; + if ((count == 0) || (count > nleft)) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } else { + // Dump + if ((count == 0) || (count > nleft)) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i=0; i < width; ++i) + stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp); + } + if (scanline) + STBI_FREE(scanline); + } + + return hdr_data; +} + +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int dummy; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (stbi__hdr_test(s) == 0) { + stbi__rewind( s ); + return 0; + } + + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) { + stbi__rewind( s ); + return 0; + } + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *y = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *x = (int) strtol(token, NULL, 10); + *comp = 3; + return 1; +} +#endif // STBI_NO_HDR + +#ifndef STBI_NO_BMP +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) +{ + void *p; + stbi__bmp_data info; + + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + if (p == NULL) { + stbi__rewind( s ); + return 0; + } + if (x) *x = s->img_x; + if (y) *y = s->img_y; + if (comp) { + if (info.bpp == 24 && info.ma == 0xff000000) + *comp = 3; + else + *comp = info.ma ? 4 : 3; + } + return 1; +} +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) +{ + int channelCount, dummy, depth; + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 8 && depth != 16) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 3) { + stbi__rewind( s ); + return 0; + } + *comp = 4; + return 1; +} + +static int stbi__psd_is16(stbi__context *s) +{ + int channelCount, depth; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + STBI_NOTUSED(stbi__get32be(s)); + STBI_NOTUSED(stbi__get32be(s)); + depth = stbi__get16be(s); + if (depth != 16) { + stbi__rewind( s ); + return 0; + } + return 1; +} +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) +{ + int act_comp=0,num_packets=0,chained,dummy; + stbi__pic_packet packets[10]; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 88); + + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) { + stbi__rewind( s); + return 0; + } + if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) { + stbi__rewind( s ); + return 0; + } + + stbi__skip(s, 8); + + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return 0; + + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; + + if (stbi__at_eof(s)) { + stbi__rewind( s ); + return 0; + } + if (packet->size != 8) { + stbi__rewind( s ); + return 0; + } + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); + + return 1; +} +#endif + +// ************************************************************************************************* +// Portable Gray Map and Portable Pixel Map loader +// by Ken Miller +// +// PGM: http://netpbm.sourceforge.net/doc/pgm.html +// PPM: http://netpbm.sourceforge.net/doc/ppm.html +// +// Known limitations: +// Does not support comments in the header section +// Does not support ASCII image data (formats P2 and P3) + +#ifndef STBI_NO_PNM + +static int stbi__pnm_test(stbi__context *s) +{ + char p, t; + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind( s ); + return 0; + } + return 1; +} + +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + STBI_NOTUSED(ri); + + ri->bits_per_channel = stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n); + if (ri->bits_per_channel == 0) + return 0; + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + + if (!stbi__mad4sizes_valid(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0)) + return stbi__errpuc("too large", "PNM too large"); + + out = (stbi_uc *) stbi__malloc_mad4(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (!stbi__getn(s, out, s->img_n * s->img_x * s->img_y * (ri->bits_per_channel / 8))) { + STBI_FREE(out); + return stbi__errpuc("bad PNM", "PNM file truncated"); + } + + if (req_comp && req_comp != s->img_n) { + if (ri->bits_per_channel == 16) { + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, s->img_n, req_comp, s->img_x, s->img_y); + } else { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + } + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + return out; +} + +static int stbi__pnm_isspace(char c) +{ + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; +} + +static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) +{ + for (;;) { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char) stbi__get8(s); + + if (stbi__at_eof(s) || *c != '#') + break; + + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' ) + *c = (char) stbi__get8(s); + } +} + +static int stbi__pnm_isdigit(char c) +{ + return c >= '0' && c <= '9'; +} + +static int stbi__pnm_getinteger(stbi__context *s, char *c) +{ + int value = 0; + + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { + value = value*10 + (*c - '0'); + *c = (char) stbi__get8(s); + if((value > 214748364) || (value == 214748364 && *c > '7')) + return stbi__err("integer parse overflow", "Parsing an integer in the PPM header overflowed a 32-bit int"); + } + + return value; +} + +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) +{ + int maxv, dummy; + char c, p, t; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + stbi__rewind(s); + + // Get identifier + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind(s); + return 0; + } + + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + + c = (char) stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); + + *x = stbi__pnm_getinteger(s, &c); // read width + if(*x == 0) + return stbi__err("invalid width", "PPM image header had zero or overflowing width"); + stbi__pnm_skip_whitespace(s, &c); + + *y = stbi__pnm_getinteger(s, &c); // read height + if (*y == 0) + return stbi__err("invalid width", "PPM image header had zero or overflowing width"); + stbi__pnm_skip_whitespace(s, &c); + + maxv = stbi__pnm_getinteger(s, &c); // read max value + if (maxv > 65535) + return stbi__err("max value > 65535", "PPM image supports only 8-bit and 16-bit images"); + else if (maxv > 255) + return 16; + else + return 8; +} + +static int stbi__pnm_is16(stbi__context *s) +{ + if (stbi__pnm_info(s, NULL, NULL, NULL) == 16) + return 1; + return 0; +} +#endif + +static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) +{ + #ifndef STBI_NO_JPEG + if (stbi__jpeg_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNG + if (stbi__png_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_GIF + if (stbi__gif_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_BMP + if (stbi__bmp_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PIC + if (stbi__pic_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_info(s, x, y, comp)) return 1; + #endif + + // test tga last because it's a crappy test! + #ifndef STBI_NO_TGA + if (stbi__tga_info(s, x, y, comp)) + return 1; + #endif + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); +} + +static int stbi__is_16_main(stbi__context *s) +{ + #ifndef STBI_NO_PNG + if (stbi__png_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_is16(s)) return 1; + #endif + return 0; +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; +} + +STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s,x,y,comp); + fseek(f,pos,SEEK_SET); + return r; +} + +STBIDEF int stbi_is_16_bit(char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_is_16_bit_from_file(f); + fclose(f); + return result; +} + +STBIDEF int stbi_is_16_bit_from_file(FILE *f) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__is_16_main(&s); + fseek(f,pos,SEEK_SET); + return r; +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__is_16_main(&s); +} + +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__is_16_main(&s); +} + +#endif // STB_IMAGE_IMPLEMENTATION + +/* + revision history: + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug + 1-bit BMP + *_is_16_bit api + avoid warnings + 2.16 (2017-07-23) all functions have 16-bit variants; + STBI_NO_STDIO works again; + compilation fixes; + fix rounding in unpremultiply; + optimize vertical flip; + disable raw_len validation; + documentation fixes + 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode; + warning fixes; disable run-time SSE detection on gcc; + uniform handling of optional "return" values; + thread-safe initialization of zlib tables + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) allocate large structures on the stack + remove white matting for transparent PSD + fix reported channel count for PNG & BMP + re-enable SSE2 in non-gcc 64-bit + support RGB-formatted JPEG + read 16-bit PNGs (only as 8-bit) + 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED + 2.09 (2016-01-16) allow comments in PNM files + 16-bit-per-pixel TGA (not bit-per-component) + info() for TGA could break due to .hdr handling + info() for BMP to shares code instead of sloppy parse + can use STBI_REALLOC_SIZED if allocator doesn't support realloc + code cleanup + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) fix compiler warnings + partial animated GIF support + limited 16-bpc PSD support + #ifdef unused functions + bug with < 92 byte PIC,PNM,HDR,TGA + 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value + 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning + 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit + 2.03 (2015-04-12) extra corruption checking (mmozeiko) + stbi_set_flip_vertically_on_load (nguillemot) + fix NEON support; fix mingw support + 2.02 (2015-01-19) fix incorrect assert, fix warning + 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 + 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG + 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) + progressive JPEG (stb) + PGM/PPM support (Ken Miller) + STBI_MALLOC,STBI_REALLOC,STBI_FREE + GIF bugfix -- seemingly never worked + STBI_NO_*, STBI_ONLY_* + 1.48 (2014-12-14) fix incorrectly-named assert() + 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) + optimize PNG (ryg) + fix bug in interlaced PNG with user-specified channel count (stb) + 1.46 (2014-08-26) + fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG + 1.45 (2014-08-16) + fix MSVC-ARM internal compiler error by wrapping malloc + 1.44 (2014-08-07) + various warning fixes from Ronny Chevalier + 1.43 (2014-07-15) + fix MSVC-only compiler problem in code changed in 1.42 + 1.42 (2014-07-09) + don't define _CRT_SECURE_NO_WARNINGS (affects user code) + fixes to stbi__cleanup_jpeg path + added STBI_ASSERT to avoid requiring assert.h + 1.41 (2014-06-25) + fix search&replace from 1.36 that messed up comments/error messages + 1.40 (2014-06-22) + fix gcc struct-initialization warning + 1.39 (2014-06-15) + fix to TGA optimization when req_comp != number of components in TGA; + fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) + add support for BMP version 5 (more ignored fields) + 1.38 (2014-06-06) + suppress MSVC warnings on integer casts truncating values + fix accidental rename of 'skip' field of I/O + 1.37 (2014-06-04) + remove duplicate typedef + 1.36 (2014-06-03) + convert to header file single-file library + if de-iphone isn't set, load iphone images color-swapped instead of returning NULL + 1.35 (2014-05-27) + various warnings + fix broken STBI_SIMD path + fix bug where stbi_load_from_file no longer left file pointer in correct place + fix broken non-easy path for 32-bit BMP (possibly never used) + TGA optimization by Arseny Kapoulkine + 1.34 (unknown) + use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case + 1.33 (2011-07-14) + make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements + 1.32 (2011-07-13) + support for "info" function for all supported filetypes (SpartanJ) + 1.31 (2011-06-20) + a few more leak fixes, bug in PNG handling (SpartanJ) + 1.30 (2011-06-11) + added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) + removed deprecated format-specific test/load functions + removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway + error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) + fix inefficiency in decoding 32-bit BMP (David Woo) + 1.29 (2010-08-16) + various warning fixes from Aurelien Pocheville + 1.28 (2010-08-01) + fix bug in GIF palette transparency (SpartanJ) + 1.27 (2010-08-01) + cast-to-stbi_uc to fix warnings + 1.26 (2010-07-24) + fix bug in file buffering for PNG reported by SpartanJ + 1.25 (2010-07-17) + refix trans_data warning (Won Chun) + 1.24 (2010-07-12) + perf improvements reading from files on platforms with lock-heavy fgetc() + minor perf improvements for jpeg + deprecated type-specific functions so we'll get feedback if they're needed + attempt to fix trans_data warning (Won Chun) + 1.23 fixed bug in iPhone support + 1.22 (2010-07-10) + removed image *writing* support + stbi_info support from Jetro Lauha + GIF support from Jean-Marc Lienher + iPhone PNG-extensions from James Brown + warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) + 1.21 fix use of 'stbi_uc' in header (reported by jon blow) + 1.20 added support for Softimage PIC, by Tom Seddon + 1.19 bug in interlaced PNG corruption check (found by ryg) + 1.18 (2008-08-02) + fix a threading bug (local mutable static) + 1.17 support interlaced PNG + 1.16 major bugfix - stbi__convert_format converted one too many pixels + 1.15 initialize some fields for thread safety + 1.14 fix threadsafe conversion bug + header-file-only version (#define STBI_HEADER_FILE_ONLY before including) + 1.13 threadsafe + 1.12 const qualifiers in the API + 1.11 Support installable IDCT, colorspace conversion routines + 1.10 Fixes for 64-bit (don't use "unsigned long") + optimized upsampling by Fabian "ryg" Giesen + 1.09 Fix format-conversion for PSD code (bad global variables!) + 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz + 1.07 attempt to fix C++ warning/errors again + 1.06 attempt to fix C++ warning/errors again + 1.05 fix TGA loading to return correct *comp and use good luminance calc + 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free + 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR + 1.02 support for (subset of) HDR files, float interface for preferred access to them + 1.01 fix bug: possible bug in handling right-side up bmps... not sure + fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all + 1.00 interface to zlib that skips zlib header + 0.99 correct handling of alpha in palette + 0.98 TGA loader by lonesock; dynamically add loaders (untested) + 0.97 jpeg errors on too large a file; also catch another malloc failure + 0.96 fix detection of invalid v value - particleman@mollyrocket forum + 0.95 during header scan, seek to markers in case of padding + 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same + 0.93 handle jpegtran output; verbose errors + 0.92 read 4,8,16,24,32-bit BMP files of several formats + 0.91 output 24-bit Windows 3.0 BMP files + 0.90 fix a few more warnings; bump version number to approach 1.0 + 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd + 0.60 fix compiling as c++ + 0.59 fix warnings: merge Dave Moore's -Wall fixes + 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian + 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available + 0.56 fix bug: zlib uncompressed mode len vs. nlen + 0.55 fix bug: restart_interval not initialized to 0 + 0.54 allow NULL for 'int *comp' + 0.53 fix bug in png 3->4; speedup png decoding + 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments + 0.51 obey req_comp requests, 1-component jpegs return as 1-component, + on 'test' only check type, not whether we support this variant + 0.50 (2006-11-19) + first released version +*/ + + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/Blastproof/config/bp_template.conf b/Blastproof/config/bp_template.conf new file mode 100644 index 0000000..6cb8088 --- /dev/null +++ b/Blastproof/config/bp_template.conf @@ -0,0 +1,48 @@ +# Blastproof config +# WARNING : DO NOT PUT ANY NON-ASCII CHAR IN THIS FILE + +# Serial port +# Define if the system is allowed to use the serial port +serial_port_enabled=true +# Define if system is allowed to push debug information on the serial port +serial_port_debuging=true +# Define if system is allowed to push errors on the serial port +serial_port_erroring=true + +# Graphics +# Define the font to use +font=bitra-ascii-medium.fbm +# Define whether or not the boot animation should be played +disable_boot_animation=true +# Set the following both to 0 use highest resolution available +# Define the screen default horizontal resolution +default_horizontal_resolution=1920 +# Define the screen default vertical resolution +default_vertical_resolution=1080 + +# InitFS +# Define the type GUID used for InitFS +# Warning : DO NOT modify this. It's common to all InitFS installation +initfs_partition_type_guid=8362b434-d825-11f0-a68f-10ffe08423a6 +# Define the InitFS partition GUID. Automatically generated by build script +initfs_partition_guid=UUID_GENERATION_NEEDED_INITFS +# Define the type GUID used SignSyst +# Warning : DO NOT modify this. It's common to all InitFS installation +signsyst_partition_type_guid=da0048b4-d826-11f0-b877-10ffe08423a6 +# Define the SignSyst partition GUID. Automatically generated by build script +signsyst_partition_guid=UUID_GENERATION_NEEDED_SIGNSYST + +# Shelter +# All the following elements are passed to the kernel +# Define the log level, value can range from 0 to 6 +kernel_log_level=0 +# Define wheither or not the kernel should test and benchmark all of his subsystems at boot time +kernel_test_benchmark=true +# Define the number of iterations for tests and benchmarks, must be between 0 and 10000 +kernel_bench_iterations=10000 +# Define if the log subsystem should output on the serial port +kernel_log_disable_serial_port=false +# Define if the serial port should be usable +kernel_disable_serial_port=false +# Define the size of the ring buffer for logging information. Value is in 4KB pages, ranging from 0 (ring logging disabled) to 65535 +kernel_log_ring_size=2048 diff --git a/Blastproof/fontgen/chars/0x21.png b/Blastproof/fontgen/chars/0x21.png new file mode 100644 index 0000000000000000000000000000000000000000..cbbab200f6c4068c957c5d8108f166b6d15c0787 GIT binary patch literal 105 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DFaUz$B>FS$$$R;w`UegNJ&Xa zNN^BNNm5|y(NgjgRE~NObl%F%&CTNruaY@0n=6CzRUT!&8=(n6y$qhNelF{r5}E+g C3L0(z literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x22.png b/Blastproof/fontgen/chars/0x22.png new file mode 100644 index 0000000000000000000000000000000000000000..5090a7802e3abf722f232b8dcc0d8afd1b3cbb37 GIT binary patch literal 105 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DFaUz$B>FS$$$R;w`T?eWo2a` y-O_y~Eg>NxNr5TAUr_l@(yj>eRT(q1nHaim@F-R?h$;j1GI+ZBxvXFS$$$R;w`T?eWo2a` z-O_y~Eg>NxNr6dVDMzB39IMOBBOdYX{Q36{g-R^`GEc6$$iy(IP>a*;PwfSuISihz KelF{r5}E+=w;u5T literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x24.png b/Blastproof/fontgen/chars/0x24.png new file mode 100644 index 0000000000000000000000000000000000000000..747fe3c7bc3d391aa6ff2422926484dfe91dbd38 GIT binary patch literal 158 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sZ>uF$B>FSYlALwF(`6m7ybW# zdUtzcWn8CY^@NAhbU9Vr85kz5y0_)x0UJhzOud`uK4$KkyQE@`(pn$?g=cppo%XS; zxE02g_wkPXjvBRXd(IVZ`gm7mW$1@LyOJJ1-@m{i`;zTt?&9R&kDo-sf%Y*lc)I$z JtaD0e0sx7oKI{Mh literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x25.png b/Blastproof/fontgen/chars/0x25.png new file mode 100644 index 0000000000000000000000000000000000000000..a4e4a8b694ea8b69a2e9a360bd0c10ca0d7cd296 GIT binary patch literal 159 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sWeX)$B>FSS0@Qc7FDX`4s>680jr+JbmZ>nvq`X&5@;q66xm5f-27eE^sJYD@< J);T3K0RTS=Im`e6 literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x26.png b/Blastproof/fontgen/chars/0x26.png new file mode 100644 index 0000000000000000000000000000000000000000..5bc58876db9535d7e89b862b942a682dec1e5b16 GIT binary patch literal 150 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7saQ`J$B>FSdnX2R9WdZ`Be)vmrYwTtB>9 zzTR7pi+Abq;3?TlXL{>dFS$$$R;w`UegNJ&Xa vNN^BNNm5`6@E25`_0?osUdTm*a25t-KTbutuGPFSS0@+>F(_~xj{5&U zT_R>l$lRAFo(44tHcCxmIQMf?i_1dUi{dA;_N>f*`8v(%uOREIgTe~DWM4fULh?J literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x29.png b/Blastproof/fontgen/chars/0x29.png new file mode 100644 index 0000000000000000000000000000000000000000..3cdbf3fcc8e2b560fd217415dc988a0201b0b1a0 GIT binary patch literal 128 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DR)m7$B>FSS0@^BF&J<#PyYA+ zeAxu2Y@WGiHI54^DKn|7Fy5Jdrb#7`d*$I-Az=%{uFYn1+O@=Vs-oPAt6$XaPJS2v d-;a3#@47>xiZ@o=I0Q73!PC{xWt~$(69CYZD&PPB literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x2A.png b/Blastproof/fontgen/chars/0x2A.png new file mode 100644 index 0000000000000000000000000000000000000000..df6cd71b284a6c9bc9fa33202ccac3972f1763be GIT binary patch literal 134 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DPK<)$B>FSZ~Hv?7!){|x&Qw+ zmUB6j6eHCCjU_e1ONptWBZpyv&#?e||L|1pluE^uYj(A1g;w#(^L_mnSom#P?Q%Zz hqt_qleq6XcfZ4UsSoBwiS~1XI22WQ%mvv4FO#oAVDv|&I literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x2B.png b/Blastproof/fontgen/chars/0x2B.png new file mode 100644 index 0000000000000000000000000000000000000000..8c3aeea7c4ac985d1867852ca05a98d1b0f371bb GIT binary patch literal 114 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DN9cm$B>FS$$$R;w`W#7;1Y0D zAn?cu5b!X5Euq-hnaRrHkm6~`c`!YtMcQH3>=T>%Y9_NSR$>r7EhK-o{v*%~1_n=8 KKbLh*2~7a)u_L$u literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x2C.png b/Blastproof/fontgen/chars/0x2C.png new file mode 100644 index 0000000000000000000000000000000000000000..bcb64615dd6474d01a953e2fcdb49c40fca3149a GIT binary patch literal 102 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DP2z&$B>FS$$$R;w`W#7;1Y0j x#UjTA9TUMIX|loD1DQf;j}%W?x&2bJVi4owR4?M#y&kBM!PC{xWt~$(698S29~uAv literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x2D.png b/Blastproof/fontgen/chars/0x2D.png new file mode 100644 index 0000000000000000000000000000000000000000..cd383f2412ffad001903b1dec6298db5d6d4897f GIT binary patch literal 97 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DGg5-$B>FS$$$R;w`W#7;1X~& vAnXX!LJ3L9jb6@79U7A*+?k6Fw3rxfUEox{Zx=HasE5JR)z4*}Q$iB}Evy-N literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x2E.png b/Blastproof/fontgen/chars/0x2E.png new file mode 100644 index 0000000000000000000000000000000000000000..bbce98319dee2c6f1765c693fb612e6618fa74c3 GIT binary patch literal 93 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DHTr_$B>FS$$$R;w`W#7;1Y0j o#UjTA9TUMoX0k+ksTKo6;UPBlCI3#D0M#>iy85}Sb4q9e04%Z_i~s-t literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x2F.png b/Blastproof/fontgen/chars/0x2F.png new file mode 100644 index 0000000000000000000000000000000000000000..5b89167bff0c463913b9311b5ba28c7cc8b08bd3 GIT binary patch literal 141 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sSr;W$B>FSS0{S%F(_~_AN>D+ zr<{k#!l!xbeI&%#6O`WNr7>LdaZwTBFSS0@_s9Z=w5e*5SD z`F9N>OD!B1MQ?6AznnvZlYwDUg}FS$$$R;w`b-tFfwwI zIMKwz!{akU%YaeXQ8d7HN6x_{Sv^spn9h%-7k#Drx;TFS2LhLwM?5Ypag<_lX%%KT XIZHzNE{l8=&_o7LS3j3^P6FSS0^|M9WdZvYX1M< z*s>&P>V+pU{$185I3^{FHoRFaH*JEUi_%Ov%{GBcHmkqK@G7$I7Jk1&i0u~V`z_D2 wC+s=p@GYR3@5uIBjPfz3>@TPPk~d-eU1lt?{KDKPKuZ`rUHx3vIVCg!0Jh*XIRF3v literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x33.png b/Blastproof/fontgen/chars/0x33.png new file mode 100644 index 0000000000000000000000000000000000000000..9463239f1cfd5d72afbe48163a62daa780019159 GIT binary patch literal 157 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sT5Ba$B>FSX9Ep|4j6DSt^fbu zNOsa@x3$+d`WY$+Ie9WLOgIwBcT-w9E@+auos{IHHC@-gttv205cS)=Ey()ro;bP0 Hl+XkK+iN)# literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x34.png b/Blastproof/fontgen/chars/0x34.png new file mode 100644 index 0000000000000000000000000000000000000000..79f1067f5aed27c973605fa7bd1ce6b0b9669fa3 GIT binary patch literal 138 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sX$K`$B>FSXD2%HF$nOmto{Fg zC!c84w%&q`>YawSMO>ErV7PF2$11_Z%}yqKi=^1!E^aBcHe9%(W5$YQ&O6nra~l`i mHqI((_z;~T_bSiF-ii0Po}6@ngP<+Ya0X9TKbLh*2~7aS$1PF- literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x35.png b/Blastproof/fontgen/chars/0x35.png new file mode 100644 index 0000000000000000000000000000000000000000..6ce52a5a7afeadc3dcad79315569265744c11134 GIT binary patch literal 147 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sVGku$B>FSS0`EX9Z=wLe!S}Y z{!P{jtyj7Pdn$VE`8yRP<5WEDxZPi^RZPE?Wi^L6fWP#v*;SsXdnG;D_otkExUKjU wi^B@n&7L*9JFFVdQ&MBb@0N$B2Y5)KL literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x36.png b/Blastproof/fontgen/chars/0x36.png new file mode 100644 index 0000000000000000000000000000000000000000..e54a76a0a0c4cc1c3d0ca2a1966a7bef05c63895 GIT binary patch literal 151 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sW?v;$B>FSXD1%yYcSw&4uACj zf03=g!cB(4j}P7xlgmBnP!z*t;9|I`FF>|6;PC`oq3t{RPV7is?)ouu<1|0jbDRt+ z(du_^9^Kn|s`OV&dU^e$;Mv(?N^|XW|H;2#$@J4wUa7lxGSDUlPgg&ebxsLQ01MAH ARR910 literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x37.png b/Blastproof/fontgen/chars/0x37.png new file mode 100644 index 0000000000000000000000000000000000000000..9540f2abdfe55a36fee1095604982ee66d801388 GIT binary patch literal 146 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sYp*3$B>FSXD1o*F(_~_xBmFA zf4#9qLnI0`#~9Sm!>F=Y5hFSSNpyB7!*02qu)qG) iQ48IwwB@aB)1+?PvlLXlc8CvXFoUP7pUXO@geCxLnJ%XQ literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x39.png b/Blastproof/fontgen/chars/0x39.png new file mode 100644 index 0000000000000000000000000000000000000000..f2f1f08f3be0a7fc9cdcd1a634458cbab70f29d2 GIT binary patch literal 149 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sTfZe$B>FSS0@_s9WdZv_MP&- z{^u@6MW(IEdo3*rIG9yDjg04hy7Wru=S2qdf2QgUrRMwJUgCCLksvFk(cIOPdG0Nr yjsG8xds{P|Q!n2YHF|MGWc&Q-^JmlvR5AS6sw)%weby47Jq(_%elF{r5}E)9DKx?W literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x3A.png b/Blastproof/fontgen/chars/0x3A.png new file mode 100644 index 0000000000000000000000000000000000000000..cbb368925bbcf3a369a041b909c5607a8e6c49b1 GIT binary patch literal 96 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DRoa5$B>FS$$$R;w`W#7;1Y1O rAn3>m5b)^p>|vIk;e73(Nf--*X&FS$$$R;w`W#7;1Y1O zAn3>m5b)^p>@nf?`E<8s5yxeoIckYjsSV8yHLeUhRCqLleHt8rx*0rO{an^LB{Ts5 DECV6S literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x3C.png b/Blastproof/fontgen/chars/0x3C.png new file mode 100644 index 0000000000000000000000000000000000000000..c1c4fd8bc05005fee98cf4ecbade6c4683263285 GIT binary patch literal 137 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sQ^zG$B>FSS0^0gV^H95TKx6@ z{y)36DxHpwo;OE9iODO>Gq)&i}}FAbB^ECP%w`j*{&c;K?;ji=V03)Oqyo({a( m8O!;>pz-cZ?Wf1;l}aV6Rg_O|-W35fo59o7&t;ucLK6Ue6f#Z# literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x3D.png b/Blastproof/fontgen/chars/0x3D.png new file mode 100644 index 0000000000000000000000000000000000000000..3298e2442b9a7d8cddc91d3bce51b867611c3baf GIT binary patch literal 97 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DGg5-$B>FS$$$R;w`W#7;1X~& wAnb^P0mr}p|NWFSXD1!xI$*%#^!UX8 z|C@IA1cY3hd#WhA_Lho>N_P15X%_sYp^P(%=QvMhSTpZ=(bg^-A)EUbFIadMZgajD ty#K`H<=O(4_Tt|~mVN&9SpNshU6(c$Ww#xdIDs}Wc)I$ztaD0e0ssXuH68!} literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x3F.png b/Blastproof/fontgen/chars/0x3F.png new file mode 100644 index 0000000000000000000000000000000000000000..9cb9650f8c8a80b5f74a3cdb2373221531145f82 GIT binary patch literal 144 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sc=sh$B>FSS0^6iJD|Yf9R2A3 z|DtjO#k|r}N1N|7xN9~}Vqj3{EKkw?*Rj#VbLNv59v)oc-%SEKo=LsiwDxxGzuo6L tcTSSlkkogaGjCg<(o21-?eC;sFsihvD{a5cv>s>$gQu&X%Q~loCIIHBGFSXM-HM7!)|NFa6)Y zhp%~)iPDP+;(^cSD0xhqd{gkFNXS!$C8?Tho{EpyZ1eXyv(EI7y0wdAgUH67c{9q| ueUBa8P&G^M=OdP8qjy58j}GfsuxF(lygTd3)&t;ucLK6Td3^9uU literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x41.png b/Blastproof/fontgen/chars/0x41.png new file mode 100644 index 0000000000000000000000000000000000000000..17f5cdd1d9e14e1be5aaab59f855246fc12ac99f GIT binary patch literal 138 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sX$K`$B>FS$$$R;w`UegNJ&Xa zNN^BNITX@(=D;EjafLv^JEj8cinl#VUtg%{OH&S1+o(`{p^2UG0+aY^Cb1O0Stglz lN?wA>j^-CNnqDsAV9@84Q`;`3!vQp$!PC{xWt~$(69DtQC#C=Z literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x42.png b/Blastproof/fontgen/chars/0x42.png new file mode 100644 index 0000000000000000000000000000000000000000..2bb0a282331db976813b2712cb02ab2645271311 GIT binary patch literal 139 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sUS}m$B>FSXZtO=4jAyTY(Dt^ z|EC=S9E$UAXf8UnVY++6lI7(Mw)fWMJb!rU*&>_luEwKx4o!J>%Hv1JIoU}KiNR-L pE~xfN-hDUcuMg*|B`^0cUsq literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x43.png b/Blastproof/fontgen/chars/0x43.png new file mode 100644 index 0000000000000000000000000000000000000000..5a237a0a183a274e7deb1365e91d7b55ae736181 GIT binary patch literal 143 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sW49$$B>FSS0@_s9WdZv&b{)# z{->3T(iRn2tF1l~dJQtnlT>bAGuFSS0^0gVo>04j{fm~ z{~o?(#f*UMw>&?eO;O-X5SdtF`#R#(xs0_+3@FS$v^)8w`Y#~@c)0k z+8-^223B9;FjpUz&;=TYm<75dBqckGgkxl9U3I=z{v)SRTj7+ITa((vcOMQe=3uA{ Wl@oQ}pfU|;8iS{+pUXO@geCwOwFS$v^)8w`Y#~@c)0k z+8-^223B9;FjpUz&;=TYm<75dBqckGgg05G+-BeY{>7q>i3wT~OZe2i1R1_CNJy>I S%c%gG#Ng@b=d#Wzp$PzA;Ujba literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x47.png b/Blastproof/fontgen/chars/0x47.png new file mode 100644 index 0000000000000000000000000000000000000000..92302df002d6aafe2980289f1ca573f7a1bc8d0d GIT binary patch literal 150 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7saQ`J$B>FSXD1%yYcSw&j(+t2 zf06ABg~hWAJK1-ZzCI-#AjH6+Vi*>8^i&|frlzaiT0YN*5B2!pZ%V0J=Na_-$bl`> zpG>Zvx+ODfPv|G58S7MkE;_jDvEMI|KlaOn-HLT(0uH!y04-whboFyt=akR{0O5}| A`2YX_ literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x48.png b/Blastproof/fontgen/chars/0x48.png new file mode 100644 index 0000000000000000000000000000000000000000..e346ff90c6b7f06533b2d629e48d1bb4a215612b GIT binary patch literal 123 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DJM@C$B>FS$$$R;w^vqHW@bM8 z|Nno<0LGX(osAp@30(^3j+{z8sOq?>R6|XXU3Ak*pNP-}h3v@*MoDgVx@#A2R$?gl WCaNJ(^zkOpGzL#sKbLh*2~7YJDJE_J literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x49.png b/Blastproof/fontgen/chars/0x49.png new file mode 100644 index 0000000000000000000000000000000000000000..cb34383ce1e692c82510306745cc5c840299a97f GIT binary patch literal 120 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DSJ;B$B>FS$v^)8w`Y#~@c)0k z+8-^22Em&uZf8?oPc=y9&1hJ&L-~}iv!@`#;`P#k Um#Ur}0UE{N>FVdQ&MBb@03F0AGynhq literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x4A.png b/Blastproof/fontgen/chars/0x4A.png new file mode 100644 index 0000000000000000000000000000000000000000..426d1b5fa9960facc6227aff89d3517d69053f7b GIT binary patch literal 135 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DL+pa$B>FSS0^3hV^H95TKNBe zy44m*?R44h$*Q*ILX!f#f>z5q-|;l0vD#dvmp3c+PIA+ikt_OK{Az~E jeqXy+J6QSKD?`{{I;e=n=oI<@O=j?P^>bP0l+XkKsEjHI literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x4B.png b/Blastproof/fontgen/chars/0x4B.png new file mode 100644 index 0000000000000000000000000000000000000000..4c76699ba5f3c138d6f9eb560a9f93bb27db83d7 GIT binary patch literal 145 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sR&OO$B>FSXD=A?9WdZvcG&&@ z|7L52g)DCtM%%invoBDY#GvxfZ@)cS%Vq!utHvGR>Vo v=a!}|d;90X;iGTQITysIK7Ty-Uvu2G2fB)WqOt-&I~Y7&{an^LB{Ts5!GJZ? literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x4C.png b/Blastproof/fontgen/chars/0x4C.png new file mode 100644 index 0000000000000000000000000000000000000000..419c4e447d798d51d41b47fdd7003c0031e2b5c8 GIT binary patch literal 108 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DPvC;$B>FS$$$R;w^vqHW@bM8 z|Nno<0tU4hp~YPp>o|lgZ!pf&Xx3IZW#!hS#`~+_5HmxuCcjM5|J|#B+8I1u{an^L HB{Ts5vD_bH literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x4D.png b/Blastproof/fontgen/chars/0x4D.png new file mode 100644 index 0000000000000000000000000000000000000000..6a2e6bb6ab661e01818f296b13347b896813c7d4 GIT binary patch literal 132 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DQ`~~$B>FSdjn2$F(~j{dhx&h zX5Qi)N7k*kk6d^2RAKZuWYH+zd}(V!GF#A!v!$n3E=!$WSQfi~o;d&3omIFSM<+OPF&OeNCIA1Q z{{32BuNW8i?8TC5juTh{LRGTo&*blX)3c=`VA9HyMcl9Y=hwa3#5>FA;gyvBV_f%) qm`i7xbC%z)nDp?%L-*%(EppQnFSPbXUQ9WdZ<&V5(^ ze7d5L>U=L*BbQa?hnW?V9VV&VmYTY@LOJWA!z`J#ECR1TU2U^_p?^W7YU{g&x%Vec kbTf^9bhG5khw24vtk+c}MMc_VfhIF}y85}Sb4q9e05iueZvX%Q literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x50.png b/Blastproof/fontgen/chars/0x50.png new file mode 100644 index 0000000000000000000000000000000000000000..87ddcba61cc26caa6d8038fdf61ad0be1ccf8b58 GIT binary patch literal 133 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DIZT4$B>FSXZsIwF(`02M}PUh z|Bn@mV(PV}8R{3$<}#_M@;gXxs?K}y$mq3>v(Sb=DW((e&P%y1Qf@uNt<3h?rn`a5 h?t5~1#crtfS;cTnT4ncV34Wlt44$rjF6*2UngACzEf@d* literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x51.png b/Blastproof/fontgen/chars/0x51.png new file mode 100644 index 0000000000000000000000000000000000000000..804d5ba09f5d9f402eefdf8b1da5be5c647397ac GIT binary patch literal 146 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sYp*3$B>FSXD52{9WdZv&i(iQ z`OAeuN{w!Nx9+o?sbj{b;<;&U=uT08mspbtYW-nM9d(u47mEMlxfQkIT6x#Tn6L2xu|FK4U$=uIBX8Zn^JMAbQBWx_Np>6vspd}2Ru6{1-oD!M<1?DxH literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x52.png b/Blastproof/fontgen/chars/0x52.png new file mode 100644 index 0000000000000000000000000000000000000000..e5e1cd0b6621a51a63b88fee520921833c713ca0 GIT binary patch literal 151 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sW?v;$B>FSM<*WSI-tPgJpJMS z|DSFai<`@o&NLvh|92o7V|VYIy1$x_NVT`>Xt@J*JoE#!5K) za_Kbpw?*!bed4ri=IMjI*0Xlxq;A}|_Nn^|M%ix$l8X~$z5s1v@O1TaS?83{1OTM* BIWGVJ literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x53.png b/Blastproof/fontgen/chars/0x53.png new file mode 100644 index 0000000000000000000000000000000000000000..dcd1aa47fae7e0d7c2ef4482ccd0ce5ea7272403 GIT binary patch literal 150 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7saQ`J$B>FSS0_3O9Z=w5I{5Pc zejok0GiFWLea}GR#>7bs3<{n9J1-XX)p&X;KFTcfh$^j|>hHU8Zp_-dCoRwUx-V>7 z*Prn+=9;*BQHJfsRLhI5FI=|&zjyKLTs`BxF-tAQ);(cA1GI?2)78&qol`;+0D4F{ A%>V!Z literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x54.png b/Blastproof/fontgen/chars/0x54.png new file mode 100644 index 0000000000000000000000000000000000000000..aa67174ba66eb237a2e7b31694708ab12c33a4a3 GIT binary patch literal 113 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DGN^*$B>FS$v^)8w`Y#~@c)0k z+8-^22Em&uZfR8lhmv-s0L@_V MboFyt=akR{0J@|iK>z>% literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x55.png b/Blastproof/fontgen/chars/0x55.png new file mode 100644 index 0000000000000000000000000000000000000000..7a2edeeba248e307b1c9a7c2b85039fa1e5ac651 GIT binary patch literal 124 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DQ8a?$B>FSZzmq)Vo>05D*gZe zd>O|QgGGIg^UP}74K+Je7(8dB?k`xtf7j{n{?l8JImuMK5S+Y0CNyfN(dqlL|0J$3 YoZBuZ%e2wo185wBr>mdKI;Vst0E3DtF8}}l literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x56.png b/Blastproof/fontgen/chars/0x56.png new file mode 100644 index 0000000000000000000000000000000000000000..d2fe7cd73e77c7a08fe3c4e2686ddbdcbc2fbd94 GIT binary patch literal 137 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sQ^zG$B>FSXD0-59WdZ<7QXp+ z{hV5nBdn6TTQ2-@b*}O7RAKZu6mH4ylVNCbDq33CH*1ZbK`7tj?yB9aw@!VTVJ?+# lv?go$xsT89X8W4A#c8aUQ&`w}E*1a) literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x57.png b/Blastproof/fontgen/chars/0x57.png new file mode 100644 index 0000000000000000000000000000000000000000..7bf14dfbf0fdeed7afa5d0d7bdddb90d96fdabcf GIT binary patch literal 137 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sQ^zG$B>FSXD=G^9Z=wL4xIh_ z{?BrUu8b8f4NGr2oKtRKygi9wulBduT%{tL?)vYXe8?lh^Vg1_GAD!#w!h_FSS0^6iJD|Yf?7jK_ z|EKRZv?Lzq39nv!YH^ZB7YDZ!cNo65FGFoa8A`_g9|q1)Mb zOnZaoefNUN=4V@{@HBtq4^CVxdgR$-n;(_a^EP@LD0rKfhyiV4@O1TaS?83{1ONgH BIQako literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x59.png b/Blastproof/fontgen/chars/0x59.png new file mode 100644 index 0000000000000000000000000000000000000000..22366a1d04d8973ac2a0de7097e49385d405f95c GIT binary patch literal 141 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sSr;W$B>FSXD1%yI$*%#6#DJ| z{x^KhI#Ug{IVavr`l=dmOO5k@S=iL|8;%7kFD|iN8GV&+RaNp428(0K&fj-6d=0Ev rs2cKiL7nFSS0`HYF(~jjt^B^< zn9uhW{~ei>x`!7Vo^{M%4h#X7F_Nb6Mw<&;$T$tSFS$$$R;w`T?eWo2b% ztp;yHLjlGJgVqOZ2N-vG&d}=EnwO$^s7L0FB44FSXD1r+9Z=w5PX6~l zydl3TK9YXr6PYG4C^e;M1& literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x5D.png b/Blastproof/fontgen/chars/0x5D.png new file mode 100644 index 0000000000000000000000000000000000000000..2ee2d1140ff537a385484f23a15d991a345d834a GIT binary patch literal 120 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DSJ;B$B>FS$$$R;w`T?eWo2b% ztp;yHLjlGJgRhBFokvx6WYu&T9P9{gk({AZ@x)_upP;eO36o3%EhdJU68svB8CLs% PMlpE0`njxgN@xNAZk-=a literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x5E.png b/Blastproof/fontgen/chars/0x5E.png new file mode 100644 index 0000000000000000000000000000000000000000..848cc8ece2eaceef9c8faca6c3f25b4708916b25 GIT binary patch literal 123 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DJM@C$B>FSZ~G2%F(~jjUH$(* z-I^sk;)3Og$(Pytwq0})a%j2LnJ3h>Xnxm;vfR)W3Wv^QojtnwZ%T>#ORt(2nHdc0 XQl!;8((ggTe~DWM4f#NI2M literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x5F.png b/Blastproof/fontgen/chars/0x5F.png new file mode 100644 index 0000000000000000000000000000000000000000..da8de1d4526f0a3c323df7d877541ff5c9513fe2 GIT binary patch literal 94 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DOFDw$B>FS$$$R;w`W#7;1Y0j r#UjPE9gPWEKY${EYKDA=zOXR(s_;lQDOnT(H86O(`njxgN@xNAJi{8g literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x60.png b/Blastproof/fontgen/chars/0x60.png new file mode 100644 index 0000000000000000000000000000000000000000..893ec4e304e2a2a3747439020ace73271bbce56d GIT binary patch literal 118 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DO*n$$B>FS$$$R;w`T?eWo2a` zJ*S&z#)TgOOD#kfN_JjWiFtKl?cr5?v4UaSjg!9e*_rohN!4{R*w>3H9{hQGGSDCf MPgg&ebxsLQ0G|dTZ~y=R literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x61.png b/Blastproof/fontgen/chars/0x61.png new file mode 100644 index 0000000000000000000000000000000000000000..c6a651b264202cc85f964ac7c4ad07364b648d7c GIT binary patch literal 129 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DGyH<$B>FS$$$R;w`W#7;1a+Z zD!TDNWKU1ej7tLI!Umj+gcF69nLkXM{77vLUlX%hla@xA@TQL}hd2|3u35N=rO33b cY^dsD`1wTC#F;0x1!yLNr>mdKI;Vst0Jv-?CIA2c literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x62.png b/Blastproof/fontgen/chars/0x62.png new file mode 100644 index 0000000000000000000000000000000000000000..428a4db5e2dc977750cea64702d625a2559cd82d GIT binary patch literal 145 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sR&OO$B>FSS0^0gJD|Yf-1y`F z{uq9@@QpLxY&cxSRFkomxuD#bIiYot)ep9;J3PhSr&B{%{67`XNYWEhd_CdvlIenT vj(zn#Q+W5AtnAgZKZK7w4Zd}G{sO+G`_$zZv@tyd+QH!I>gTe~DWM4fBV{(1 literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x63.png b/Blastproof/fontgen/chars/0x63.png new file mode 100644 index 0000000000000000000000000000000000000000..677683f1062b65c160b0cff1c6ad3e3c0f77b45e GIT binary patch literal 145 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sR&OO$B>FSXD3;6F(~jj`+ocv z-q#%9{9$v+hG1#=A39Ab)uA?>K`YYP51b1!_iJfWnAyB9I&B*lI{ZSVDNPHb6Mw<&;$UiGc>&b literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x64.png b/Blastproof/fontgen/chars/0x64.png new file mode 100644 index 0000000000000000000000000000000000000000..e8bcd24fd0e5f8ef4c76014b094509ae764e9448 GIT binary patch literal 145 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sR&OO$B>FSXD1r+9WdZvHazuz z|C_v(ubCpPu0&sG>|>a8k;TDf!kNAcHSD^^Hv+HgaIV&=eV_Mw=iHKgf4#NiEY4;s vUOlzx)#*tx=f3Z_JNq)%Geh=|`<=z-G^oqk8t0|}?O^b9^>bP0l+XkKMkF>< literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x65.png b/Blastproof/fontgen/chars/0x65.png new file mode 100644 index 0000000000000000000000000000000000000000..aa846243895ad632342fab3c53bde54105ae767e GIT binary patch literal 144 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sc=sh$B>FSXD1!xI$*%z{5|FW z|I;rMCs-|<=E(nfiD8D4(#u<}Ig;+4+l?1|(J>F};ZZv2S~lz8%9VGNrq5M7?p}WO tFS$$$R;w`bN$NJ(kg zthh;M$C8PR++l$xk7pY69xP!HmXZ%<6?2VXRCdwUQ+H1BG~~>a_?aiw(FSdnexJI$*%#l>YMn z|4%oa7Z$$cJr*&4)x-sE2TH4+uJ{oW^4otBf5S0`)KwlnrEffAMdk&G^VP3YaM66g z%=G)StN-G2Gp$nZOHDVKm%1-=vftTLS;4dQE?ZVIFZyJlbbo4H5706OPgg&ebxsLQ E0N)im9{>OV literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x68.png b/Blastproof/fontgen/chars/0x68.png new file mode 100644 index 0000000000000000000000000000000000000000..6412356a1859a8220efee99832f092f31c8d65db GIT binary patch literal 132 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DQ`~~$B>FSdnb5uF(~jbOMmvbu)Vv!{pWEN% h)~tW_cGvlZY|B@wb6PoNssW8<@O1TaS?83{1ORd$EG7T| literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x69.png b/Blastproof/fontgen/chars/0x69.png new file mode 100644 index 0000000000000000000000000000000000000000..35838ead92f85b102dcbc5a757f7e9736a3fc270 GIT binary patch literal 101 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DIHH2$B>FS$$$R;w`W#7;1a;< xD{5#20+W)3l+V_<@0-oGSm{>l<%9qhhPKa~n&*7CMgsLQc)I$ztaD0e0sx+h8}k4F literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x6A.png b/Blastproof/fontgen/chars/0x6A.png new file mode 100644 index 0000000000000000000000000000000000000000..b73924385e894d5c4da94beff8a8d33837bb2317 GIT binary patch literal 126 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DOXPy$B>FS$$$R;w`W#7;1a+p zD`IE_0+Z5&l&{ttZ+aFSS1%sqVo=~>lZpN1g>a)S~yukkiml~;(JwFZEe5Dx&^vHJ6(mM%ob}IZF-btZEC@?{anxL kbLn0OZ~pzGciB9H>Ar)Uy4Hn=MxfCQp00i_>zopr0FhKKPXGV_ literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x6C.png b/Blastproof/fontgen/chars/0x6C.png new file mode 100644 index 0000000000000000000000000000000000000000..1c716b83ec3d8a210b5ddee94f44100709bbbd94 GIT binary patch literal 97 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DGg5-$B>FS$$$R;w`UegNJ&Xa vNN^BNNm5|y(URKLC2>({&arD3BLx{Qo#s^3+o&)PsE5JR)z4*}Q$iB}M9doB literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x6D.png b/Blastproof/fontgen/chars/0x6D.png new file mode 100644 index 0000000000000000000000000000000000000000..fc56a8550ec0b471215eec2a2e6535d002dd9496 GIT binary patch literal 115 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DJxGG$B>FS$$$R;w`W#7;1a+Z zDr%Upi!<`ce=s<{h$EPvdl_R-PtS>*U%&GVw31|6RyI_<FSXD1nQF&JmZC zy`?M*dvt-|%5KR+vK>roW`#Ok`LFW+=>0HE-~%PL-8-3 Z?1_&h6#5RtuLGLL;OXk;vd$@?2>>~tC{X|a literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x6F.png b/Blastproof/fontgen/chars/0x6F.png new file mode 100644 index 0000000000000000000000000000000000000000..e716bfdfe2b708fe64b62ff896451c8abfd73ea2 GIT binary patch literal 137 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sQ^zG$B>FSXD1xwVo>04p8e(j z{ylt+e1>~muB^PNJdgS0McM4m)fc`FSXD2vv9WdZv3IF&% z{+^7}+K5gTuM=}pBp*pBcJk%+#;YFkPnzWj#ytrsP`O&FdSsL01WIOz#ApTX1B&t;ucLK6VSo-;B4 literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x71.png b/Blastproof/fontgen/chars/0x71.png new file mode 100644 index 0000000000000000000000000000000000000000..19ec5bbdba790fbd8af54e6d598221348f3471a1 GIT binary patch literal 140 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sbEhR$B>FSXD2%HH5hQPw0`)n z@6WHc$uY=ABH2IHsy#LPfT65>Z}`$3d1=e|4|p=<_c}SH-wj!MCS=!zkIh`ae7s(7 pI`HrJrscbrt3>@W`Eh?K>)lN%${pEn`GLkWc)I$ztaD0e0st>6G9>^2 literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x72.png b/Blastproof/fontgen/chars/0x72.png new file mode 100644 index 0000000000000000000000000000000000000000..5b31d57cad8bd6a19bcd5a1d4fcb33243284430d GIT binary patch literal 125 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DHl%{$B>FSXD1nQF&JtxGyAI4;z`FRrKCAHe6ZE*J+zhYM9-H!66x1}3)@Wl@=*MY ar*!I438kZ{=O+QpWAJqKb6Mw<&;$Vfm@Di6 literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x73.png b/Blastproof/fontgen/chars/0x73.png new file mode 100644 index 0000000000000000000000000000000000000000..19fb8fef125bae19c8e097e1cd5ca0c52de2c229 GIT binary patch literal 138 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sX$K`$B>FS$$$R;w`W#7;1a+Z zD!TDNWKU1ej7tLI#t#DFS$$$R;w`T?eWo2a` zJ*S&zhCyq_$_~Lt939OU81B?AnPIY`#(kR7n#6-0Z)AN$1tQga6q(cnUoTT!%)zkI WPfqlv!;dVWkqn-$elF{r5}E+&FSXM;|2F(`1{UiAO} z(^Affq7{OtZr`c#_FR!OrK?QPbCSCFKX%*F?wkWW>FVdQ&MBb@04icCZvX%Q literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x76.png b/Blastproof/fontgen/chars/0x76.png new file mode 100644 index 0000000000000000000000000000000000000000..e422c0c60a2b769cfab672c3317ba9d20d66ee4c GIT binary patch literal 138 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sX$K`$B>FSXD1!xVo>04Hh=lQ z{w7~zr>APkiP?E?X1atPUHRliMV^vp(bb%|BdrGaxf;%jwuRKsRb+~)$vMSzp=rXs nq(@FO^4E(!U%vlA?@DGlBMF^LYG1bl4QKFl^>bP0l+XkKitjQ8 literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x77.png b/Blastproof/fontgen/chars/0x77.png new file mode 100644 index 0000000000000000000000000000000000000000..3f77cd1593d9b63dc4184f50f15a6dbbe7416014 GIT binary patch literal 134 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DPK<)$B>FSXM-I17!){|{{K(6 z+OqS~wAH02W;ZSH^KudmGnzNqW73%!*Vn(9wyNFBF~NuTz_WR>v)=5nUJ;k8uDSd6 ji@h=&4EBGU%$Km8=aSHU)!NkqG?>BD)z4*}Q$iB}nb|Ka literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x78.png b/Blastproof/fontgen/chars/0x78.png new file mode 100644 index 0000000000000000000000000000000000000000..eb23df8e16bf1cb97ecc45e7cc3daf90a3fb5c8d GIT binary patch literal 136 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DSuBF$B>FSXD1nQF(~jb@BR1x ze5pXwtc6^BWtW~zSmKekEo+g=JZFXn^S;bj+8YrUw+z>5%IFxOEFS_NjiI l@-TMK`@jObzpVKS_!dfQX}&LOoB}kO!PC{xWt~$(69Ax#E~@|l literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x79.png b/Blastproof/fontgen/chars/0x79.png new file mode 100644 index 0000000000000000000000000000000000000000..648ab2a6db1d650ec2d3b13148f46bf39cc6e4a1 GIT binary patch literal 151 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sW?v;$B>FSYbP4=9WdZv-gxzY z{Y<{8N=$q_n$ZX5Y+x?mzS2UgPMnb;Abs`u8(v+#S0w(p-S*A44wN)kId*1V)tWN( z#E+GNX^Xf5O$&K8E1%S{NHTi)#a_lRcE-jyk0uo@m)SFa0c~RNboFyt=akR{0M6kt A+yDRo literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x7A.png b/Blastproof/fontgen/chars/0x7A.png new file mode 100644 index 0000000000000000000000000000000000000000..1a411749f2ddd795b7033c2e96aa09b993d4af2e GIT binary patch literal 127 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DK}3S$B>FSS1%ayF(_~_JG}b; zKgON4d&UbT<(*U5CTz)y(wwKHZ9UP(tI3DS;J%=zBgdAmjwJ;e!pylY`@TMSFSXD3*5F&J<-`@XMF zw-#|(lv$V^CjXP8e7h1un%MHC9c$REPI2-CUO4ROw3XMnYsT@d{MmCv%;Uw=Oq{Gz j*EMDt$GGi%T-R>aFCwjY#@K-uXf%VTtDnm{r-UW|T{S35 literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x7C.png b/Blastproof/fontgen/chars/0x7C.png new file mode 100644 index 0000000000000000000000000000000000000000..c6fde3c4694c86897feb641cfd3b83fedd16638f GIT binary patch literal 91 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DJ4%A$B>FS$$$R;w`UegNJ&Xa oNN^BNNm5|y(UJ-^u@GfsaFSFVdQ&MBb@08wET6aWAK literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x7D.png b/Blastproof/fontgen/chars/0x7D.png new file mode 100644 index 0000000000000000000000000000000000000000..f55e4e57c2c135ad63870b98e5fa033afb7acb68 GIT binary patch literal 140 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7sbEhR$B>FSS0_4h9WdZI{N&&N z^EVe7bcwI;nRhMP;Y8+|RZI+?2W;2n->|vJv9<4*$elF{r5}E)OdN%w3 literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0x7E.png b/Blastproof/fontgen/chars/0x7E.png new file mode 100644 index 0000000000000000000000000000000000000000..e0246f1ce536f6360b72ff3e1eb756b0a0ff514f GIT binary patch literal 126 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DOXPy$B>FSZzmY?F(_~xv3dM| ze+;{@vp`D8N4IsFjErl&^gE|dQ1LX+xNa!EkKsmff<@KLIsVH}3a#c{x4Angxaw|u ZmrqZbqLhDDUJKAb22WQ%mvv4FO#mhZCkp@o literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/chars/0xFF.png b/Blastproof/fontgen/chars/0xFF.png new file mode 100644 index 0000000000000000000000000000000000000000..489945b06ee81debee45c7930638db4d7373cd88 GIT binary patch literal 330 zcmeAS@N?(olHy`uVBq!ia0vp^JU}eM!2~4DCV9I7DYhhUcNd2LAh=-f^2tCE_7YED zSN50OlA;E3Gkz_83KRZXIu34b-3kx1l66H?_DVF}DC+kA;=70mPD=BiEIIdKf%i L{an^LB{Ts5?WAgi literal 0 HcmV?d00001 diff --git a/Blastproof/fontgen/fontgen.cpp b/Blastproof/fontgen/fontgen.cpp new file mode 100644 index 0000000..b19d709 --- /dev/null +++ b/Blastproof/fontgen/fontgen.cpp @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: MPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" +using namespace std; +namespace fs=filesystem; +struct color { + uint8_t r,g,b; +}; +struct pixel { + bool shade_1; + bool check_1; + bool shade_2; + bool check_2; + bool shade_3; + bool check_3; + bool shade_4; + bool enabled; +}; +struct character { + uint32_t codepoint; + vector data; +}; +struct fbm_header { + uint8_t sig[3]={'F','B','M'}; + uint8_t encoding; + uint32_t charnum; + uint8_t width; + uint8_t height; +}; +uint32_t charname_to_uint32(string charname) { + uint32_t value=0; + stringstream ss; + ss<>value; + int shift=(8-charname.size())*4; + value<<=shift; + return value; +} +int get_shade_from_color(const color &c,const color gradient[16]) { + for (int i=0;i<16;++i) { + if (gradient[i].r==c.r && gradient[i].g==c.g && gradient[i].b==c.b) { + return i; + } + } + return -1; +} +uint8_t pixel_to_byte(pixel p) { + uint8_t out=0x00; + out|=(uint8_t)p.shade_1<<7; + out|=(uint8_t)p.check_1<<6; + out|=(uint8_t)p.shade_2<<5; + out|=(uint8_t)p.check_2<<4; + out|=(uint8_t)p.shade_3<<3; + out|=(uint8_t)p.check_3<<2; + out|=(uint8_t)p.shade_4<<1; + out|=(uint8_t)p.enabled; + return out; +} +map> chars_data; +vector known_encoding={"ascii","utf8","utf16"}; +int main(int argc,char **argv) { + if (argc!=5) { + cout<<"[Fontgen] Error: wrong amount of arguments."<(bg))) { + cout<<"[Fontgen] Error: invalid background color."<(ft))) { + cout<<"[Fontgen] Error: invalid font color."<(std::stoi(bg_color.substr(1,2),nullptr,16)); + start.g=static_cast(std::stoi(bg_color.substr(3,2),nullptr,16)); + start.b=static_cast(std::stoi(bg_color.substr(5,2),nullptr,16)); + end.r=static_cast(std::stoi(ft_color.substr(1,2),nullptr,16)); + end.g=static_cast(std::stoi(ft_color.substr(3,2),nullptr,16)); + end.b=static_cast(std::stoi(ft_color.substr(5,2),nullptr,16)); + color gradient[16]; + for (int i=0;i<16;++i) { + gradient[i].r=start.r+i*(end.r-start.r)/15.0f; + gradient[i].g=start.g+i*(end.g-start.g)/15.0f; + gradient[i].b=start.b+i*(end.b-start.b)/15.0f; + } + string encoding=string(argv[3]); + if (find(known_encoding.begin(),known_encoding.end(),encoding)==known_encoding.end()) { + cout<<"[Fontgen] Error: encoding not supported."<8) { + cout<<"[Fontgen] Warning: "< pixels(data,data+w*h*channel); + stbi_image_free(data); + chars_data[key]=std::move(pixels); + i++; + } + vector characters; + for (auto it:chars_data) { + character chara; + chara.codepoint=it.first; + uint8_t byte1=(chara.codepoint>>24) & 0xFF; + uint8_t byte2=(chara.codepoint>>16) & 0xFF; + uint8_t byte3=(chara.codepoint>>8) & 0xFF; + bool pcheck_1=(byte1>>1) & 1; + bool pcheck_2=(byte2>>3) & 1; + bool pcheck_3=(byte3>>5) & 1; + for (int y=0;y15 || i<0) { + cout<<"[Fontgen] Error: found a color that isn't in shade map. Codepoint that caused the error: "<>3)&1; + p.shade_2=(shade>>2)&1; + p.shade_3=(shade>>1)&1; + p.shade_4=(shade>>0)&1; + if (shade!=0) { + p.enabled=true; + } else { + p.enabled=false; + } + p.check_1=pcheck_1; + p.check_2=pcheck_2; + p.check_3=pcheck_3; + uint8_t byte=pixel_to_byte(p); + chara.data.push_back(byte); + } + } + characters.push_back(chara); + } + fbm_header header; + header.charnum=(uint32_t)characters.size(); + if (width>255 || width<1) { + cout<<"[Fontgen] Error: width isn't between 1 and 255."<255 || height<1) { + cout<<"[Fontgen] Error: height isn't between 1 and 255."< fbm_font; + size_t charsize=4+header.width*header.height; + fbm_font.resize(10+header.charnum*charsize); + memcpy(fbm_font.data(),header.sig,3); + memcpy(fbm_font.data()+3,&header.encoding,1); + memcpy(fbm_font.data()+4,&header.charnum,4); + memcpy(fbm_font.data()+8,&header.width,1); + memcpy(fbm_font.data()+9,&header.height,1); + for (int i=0;i(fbm_font.data()),fbm_font.size()); + fileout.close(); + cout<<"[Fontgen] Successfully generated font.fbm ("< +#endif // STBI_NO_STDIO + +#define STBI_VERSION 1 + +enum +{ + STBI_default = 0, // only used for desired_channels + + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 +}; + +#include +typedef unsigned char stbi_uc; +typedef unsigned short stbi_us; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef STBIDEF +#ifdef STB_IMAGE_STATIC +#define STBIDEF static +#else +#define STBIDEF extern +#endif +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// PRIMARY API - works on images of any type +// + +// +// load image by filename, open file, or memory buffer +// + +typedef struct +{ + int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int (*eof) (void *user); // returns nonzero if we are at end of file/data +} stbi_io_callbacks; + +//////////////////////////////////// +// +// 8-bits-per-channel interface +// + +STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +// for stbi_load_from_file, file pointer is left pointing immediately after image +#endif + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +#endif + +#ifdef STBI_WINDOWS_UTF8 +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input); +#endif + +//////////////////////////////////// +// +// 16-bits-per-channel interface +// + +STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif + +//////////////////////////////////// +// +// float-per-channel interface +// +#ifndef STBI_NO_LINEAR + STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + + #ifndef STBI_NO_STDIO + STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + #endif +#endif + +#ifndef STBI_NO_HDR + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); +#endif // STBI_NO_HDR + +#ifndef STBI_NO_LINEAR + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); +#endif // STBI_NO_LINEAR + +// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename); +STBIDEF int stbi_is_hdr_from_file(FILE *f); +#endif // STBI_NO_STDIO + + +// get a VERY brief reason for failure +// on most compilers (and ALL modern mainstream compilers) this is threadsafe +STBIDEF const char *stbi_failure_reason (void); + +// free the loaded image -- this is just free() +STBIDEF void stbi_image_free (void *retval_from_stbi_load); + +// get image dimensions & components without fully decoding +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len); +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit (char const *filename); +STBIDEF int stbi_is_16_bit_from_file(FILE *f); +#endif + + + +// for image formats that explicitly notate that they have premultiplied alpha, +// we just return the colors as stored in the file. set this flag to force +// unpremultiplication. results are undefined if the unpremultiply overflow. +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + +// indicate whether we should process iphone images back to canonical format, +// or just pass them through "as-is" +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + +// flip the image vertically, so the first pixel in the output array is the bottom left +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + +// as above, but only applies to images loaded on the thread that calls the function +// this function is only available if your compiler supports thread-local variables; +// calling it will fail to link if your compiler doesn't +STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply); +STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert); +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip); + +// ZLIB client - used by PNG, available for other purposes + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); +STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + +STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + +#ifdef __cplusplus +} +#endif + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBI_INCLUDE_STB_IMAGE_H + +#ifdef STB_IMAGE_IMPLEMENTATION + +#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ + || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ + || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ + || defined(STBI_ONLY_ZLIB) + #ifndef STBI_ONLY_JPEG + #define STBI_NO_JPEG + #endif + #ifndef STBI_ONLY_PNG + #define STBI_NO_PNG + #endif + #ifndef STBI_ONLY_BMP + #define STBI_NO_BMP + #endif + #ifndef STBI_ONLY_PSD + #define STBI_NO_PSD + #endif + #ifndef STBI_ONLY_TGA + #define STBI_NO_TGA + #endif + #ifndef STBI_ONLY_GIF + #define STBI_NO_GIF + #endif + #ifndef STBI_ONLY_HDR + #define STBI_NO_HDR + #endif + #ifndef STBI_ONLY_PIC + #define STBI_NO_PIC + #endif + #ifndef STBI_ONLY_PNM + #define STBI_NO_PNM + #endif +#endif + +#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) +#define STBI_NO_ZLIB +#endif + + +#include +#include // ptrdiff_t on osx +#include +#include +#include + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#include // ldexp, pow +#endif + +#ifndef STBI_NO_STDIO +#include +#endif + +#ifndef STBI_ASSERT +#include +#define STBI_ASSERT(x) assert(x) +#endif + +#ifdef __cplusplus +#define STBI_EXTERN extern "C" +#else +#define STBI_EXTERN extern +#endif + + +#ifndef _MSC_VER + #ifdef __cplusplus + #define stbi_inline inline + #else + #define stbi_inline + #endif +#else + #define stbi_inline __forceinline +#endif + +#ifndef STBI_NO_THREAD_LOCALS + #if defined(__cplusplus) && __cplusplus >= 201103L + #define STBI_THREAD_LOCAL thread_local + #elif defined(__GNUC__) && __GNUC__ < 5 + #define STBI_THREAD_LOCAL __thread + #elif defined(_MSC_VER) + #define STBI_THREAD_LOCAL __declspec(thread) + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_THREADS__) + #define STBI_THREAD_LOCAL _Thread_local + #endif + + #ifndef STBI_THREAD_LOCAL + #if defined(__GNUC__) + #define STBI_THREAD_LOCAL __thread + #endif + #endif +#endif + +#if defined(_MSC_VER) || defined(__SYMBIAN32__) +typedef unsigned short stbi__uint16; +typedef signed short stbi__int16; +typedef unsigned int stbi__uint32; +typedef signed int stbi__int32; +#else +#include +typedef uint16_t stbi__uint16; +typedef int16_t stbi__int16; +typedef uint32_t stbi__uint32; +typedef int32_t stbi__int32; +#endif + +// should produce compiler error if size is wrong +typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBI_NOTUSED(v) (void)(v) +#else +#define STBI_NOTUSED(v) (void)sizeof(v) +#endif + +#ifdef _MSC_VER +#define STBI_HAS_LROTL +#endif + +#ifdef STBI_HAS_LROTL + #define stbi_lrot(x,y) _lrotl(x,y) +#else + #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (-(y) & 31))) +#endif + +#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) +// ok +#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." +#endif + +#ifndef STBI_MALLOC +#define STBI_MALLOC(sz) malloc(sz) +#define STBI_REALLOC(p,newsz) realloc(p,newsz) +#define STBI_FREE(p) free(p) +#endif + +#ifndef STBI_REALLOC_SIZED +#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) +#endif + +// x86/x64 detection +#if defined(__x86_64__) || defined(_M_X64) +#define STBI__X64_TARGET +#elif defined(__i386) || defined(_M_IX86) +#define STBI__X86_TARGET +#endif + +#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) +// gcc doesn't support sse2 intrinsics unless you compile with -msse2, +// which in turn means it gets to use SSE2 everywhere. This is unfortunate, +// but previous attempts to provide the SSE2 functions with runtime +// detection caused numerous issues. The way architecture extensions are +// exposed in GCC/Clang is, sadly, not really suited for one-file libs. +// New behavior: if compiled with -msse2, we use SSE2 without any +// detection; if not, we don't use it at all. +#define STBI_NO_SIMD +#endif + +#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) +// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET +// +// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the +// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. +// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not +// simultaneously enabling "-mstackrealign". +// +// See https://github.com/nothings/stb/issues/81 for more information. +// +// So default to no SSE2 on 32-bit MinGW. If you've read this far and added +// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. +#define STBI_NO_SIMD +#endif + +#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) +#define STBI_SSE2 +#include + +#ifdef _MSC_VER + +#if _MSC_VER >= 1400 // not VC6 +#include // __cpuid +static int stbi__cpuid3(void) +{ + int info[4]; + __cpuid(info,1); + return info[3]; +} +#else +static int stbi__cpuid3(void) +{ + int res; + __asm { + mov eax,1 + cpuid + mov res,edx + } + return res; +} +#endif + +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; +} +#endif + +#else // assume GCC-style if not VC++ +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + // If we're even attempting to compile this on GCC/Clang, that means + // -msse2 is on, which means the compiler is allowed to use SSE2 + // instructions at will, and so are we. + return 1; +} +#endif + +#endif +#endif + +// ARM NEON +#if defined(STBI_NO_SIMD) && defined(STBI_NEON) +#undef STBI_NEON +#endif + +#ifdef STBI_NEON +#include +#ifdef _MSC_VER +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name +#else +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) +#endif +#endif + +#ifndef STBI_SIMD_ALIGN +#define STBI_SIMD_ALIGN(type, name) type name +#endif + +#ifndef STBI_MAX_DIMENSIONS +#define STBI_MAX_DIMENSIONS (1 << 24) +#endif + +/////////////////////////////////////////////// +// +// stbi__context struct and start_xxx functions + +// stbi__context structure is our basic context used by all images, so it +// contains all the IO context, plus some basic image information +typedef struct +{ + stbi__uint32 img_x, img_y; + int img_n, img_out_n; + + stbi_io_callbacks io; + void *io_user_data; + + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + int callback_already_read; + + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; +} stbi__context; + + +static void stbi__refill_buffer(stbi__context *s); + +// initialize a memory-decode context +static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) +{ + s->io.read = NULL; + s->read_from_callbacks = 0; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len; +} + +// initialize a callback-based context +static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) +{ + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; +} + +#ifndef STBI_NO_STDIO + +static int stbi__stdio_read(void *user, char *data, int size) +{ + return (int) fread(data,1,size,(FILE*) user); +} + +static void stbi__stdio_skip(void *user, int n) +{ + int ch; + fseek((FILE*) user, n, SEEK_CUR); + ch = fgetc((FILE*) user); /* have to read a byte to reset feof()'s flag */ + if (ch != EOF) { + ungetc(ch, (FILE *) user); /* push byte back onto stream if valid. */ + } +} + +static int stbi__stdio_eof(void *user) +{ + return feof((FILE*) user) || ferror((FILE *) user); +} + +static stbi_io_callbacks stbi__stdio_callbacks = +{ + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, +}; + +static void stbi__start_file(stbi__context *s, FILE *f) +{ + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f); +} + +//static void stop_file(stbi__context *s) { } + +#endif // !STBI_NO_STDIO + +static void stbi__rewind(stbi__context *s) +{ + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; +} + +enum +{ + STBI_ORDER_RGB, + STBI_ORDER_BGR +}; + +typedef struct +{ + int bits_per_channel; + int num_channels; + int channel_order; +} stbi__result_info; + +#ifndef STBI_NO_JPEG +static int stbi__jpeg_test(stbi__context *s); +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNG +static int stbi__png_test(stbi__context *s); +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__png_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_BMP +static int stbi__bmp_test(stbi__context *s); +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_TGA +static int stbi__tga_test(stbi__context *s); +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s); +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__psd_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_HDR +static int stbi__hdr_test(stbi__context *s); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_test(stbi__context *s); +static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_GIF +static int stbi__gif_test(stbi__context *s); +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNM +static int stbi__pnm_test(stbi__context *s); +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__pnm_is16(stbi__context *s); +#endif + +static +#ifdef STBI_THREAD_LOCAL +STBI_THREAD_LOCAL +#endif +const char *stbi__g_failure_reason; + +STBIDEF const char *stbi_failure_reason(void) +{ + return stbi__g_failure_reason; +} + +#ifndef STBI_NO_FAILURE_STRINGS +static int stbi__err(const char *str) +{ + stbi__g_failure_reason = str; + return 0; +} +#endif + +static void *stbi__malloc(size_t size) +{ + return STBI_MALLOC(size); +} + +// stb_image uses ints pervasively, including for offset calculations. +// therefore the largest decoded image size we can support with the +// current code, even on 64-bit targets, is INT_MAX. this is not a +// significant limitation for the intended use case. +// +// we do, however, need to make sure our size calculations don't +// overflow. hence a few helper functions for size calculations that +// multiply integers together, making sure that they're non-negative +// and no overflow occurs. + +// return 1 if the sum is valid, 0 on overflow. +// negative terms are considered invalid. +static int stbi__addsizes_valid(int a, int b) +{ + if (b < 0) return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; +} + +// returns 1 if the product is valid, 0 on overflow. +// negative factors are considered invalid. +static int stbi__mul2sizes_valid(int a, int b) +{ + if (a < 0 || b < 0) return 0; + if (b == 0) return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX/b; +} + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow +static int stbi__mad2sizes_valid(int a, int b, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); +} +#endif + +// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow +static int stbi__mad3sizes_valid(int a, int b, int c, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__addsizes_valid(a*b*c, add); +} + +// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) +static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); +} +#endif + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// mallocs with size overflow checking +static void *stbi__malloc_mad2(int a, int b, int add) +{ + if (!stbi__mad2sizes_valid(a, b, add)) return NULL; + return stbi__malloc(a*b + add); +} +#endif + +static void *stbi__malloc_mad3(int a, int b, int c, int add) +{ + if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; + return stbi__malloc(a*b*c + add); +} + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) || !defined(STBI_NO_PNM) +static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) +{ + if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; + return stbi__malloc(a*b*c*d + add); +} +#endif + +// returns 1 if the sum of two signed ints is valid (between -2^31 and 2^31-1 inclusive), 0 on overflow. +static int stbi__addints_valid(int a, int b) +{ + if ((a >= 0) != (b >= 0)) return 1; // a and b have different signs, so no overflow + if (a < 0 && b < 0) return a >= INT_MIN - b; // same as a + b >= INT_MIN; INT_MIN - b cannot overflow since b < 0. + return a <= INT_MAX - b; +} + +// returns 1 if the product of two ints fits in a signed short, 0 on overflow. +static int stbi__mul2shorts_valid(int a, int b) +{ + if (b == 0 || b == -1) return 1; // multiplication by 0 is always 0; check for -1 so SHRT_MIN/b doesn't overflow + if ((a >= 0) == (b >= 0)) return a <= SHRT_MAX/b; // product is positive, so similar to mul2sizes_valid + if (b < 0) return a <= SHRT_MIN / b; // same as a * b >= SHRT_MIN + return a >= SHRT_MIN / b; +} + +// stbi__err - error +// stbi__errpf - error returning pointer to float +// stbi__errpuc - error returning pointer to unsigned char + +#ifdef STBI_NO_FAILURE_STRINGS + #define stbi__err(x,y) 0 +#elif defined(STBI_FAILURE_USERMSG) + #define stbi__err(x,y) stbi__err(y) +#else + #define stbi__err(x,y) stbi__err(x) +#endif + +#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) +#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) + +STBIDEF void stbi_image_free(void *retval_from_stbi_load) +{ + STBI_FREE(retval_from_stbi_load); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); +#endif + +#ifndef STBI_NO_HDR +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); +#endif + +static int stbi__vertically_flip_on_load_global = 0; + +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load_global = flag_true_if_should_flip; +} + +#ifndef STBI_THREAD_LOCAL +#define stbi__vertically_flip_on_load stbi__vertically_flip_on_load_global +#else +static STBI_THREAD_LOCAL int stbi__vertically_flip_on_load_local, stbi__vertically_flip_on_load_set; + +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load_local = flag_true_if_should_flip; + stbi__vertically_flip_on_load_set = 1; +} + +#define stbi__vertically_flip_on_load (stbi__vertically_flip_on_load_set \ + ? stbi__vertically_flip_on_load_local \ + : stbi__vertically_flip_on_load_global) +#endif // STBI_THREAD_LOCAL + +static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; + + // test the formats with a very explicit header first (at least a FOURCC + // or distinctive magic number first) + #ifndef STBI_NO_PNG + if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_BMP + if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_GIF + if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PSD + if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc); + #else + STBI_NOTUSED(bpc); + #endif + #ifndef STBI_NO_PIC + if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri); + #endif + + // then the formats that can end up attempting to load with just 1 or 2 + // bytes matching expectations; these are prone to false positives, so + // try them later + #ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNM + if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri); + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } + #endif + + #ifndef STBI_NO_TGA + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s,x,y,comp,req_comp, ri); + #endif + + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); +} + +static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi_uc *reduced; + + reduced = (stbi_uc *) stbi__malloc(img_len); + if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + + STBI_FREE(orig); + return reduced; +} + +static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; + + enlarged = (stbi__uint16 *) stbi__malloc(img_len*2); + if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + + STBI_FREE(orig); + return enlarged; +} + +static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel) +{ + int row; + size_t bytes_per_row = (size_t)w * bytes_per_pixel; + stbi_uc temp[2048]; + stbi_uc *bytes = (stbi_uc *)image; + + for (row = 0; row < (h>>1); row++) { + stbi_uc *row0 = bytes + row*bytes_per_row; + stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row; + // swap row0 with row1 + size_t bytes_left = bytes_per_row; + while (bytes_left) { + size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp); + memcpy(temp, row0, bytes_copy); + memcpy(row0, row1, bytes_copy); + memcpy(row1, temp, bytes_copy); + row0 += bytes_copy; + row1 += bytes_copy; + bytes_left -= bytes_copy; + } + } +} + +#ifndef STBI_NO_GIF +static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel) +{ + int slice; + int slice_size = w * h * bytes_per_pixel; + + stbi_uc *bytes = (stbi_uc *)image; + for (slice = 0; slice < z; ++slice) { + stbi__vertical_flip(bytes, w, h, bytes_per_pixel); + bytes += slice_size; + } +} +#endif + +static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 8) { + result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } + + // @TODO: move stbi__convert_format to here + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc)); + } + + return (unsigned char *) result; +} + +static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 16) { + result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } + + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16)); + } + + return (stbi__uint16 *) result; +} + +#if !defined(STBI_NO_HDR) && !defined(STBI_NO_LINEAR) +static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) +{ + if (stbi__vertically_flip_on_load && result != NULL) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(float)); + } +} +#endif + +#ifndef STBI_NO_STDIO + +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) +STBI_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide); +STBI_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default); +#endif + +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input) +{ + return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL); +} +#endif + +static FILE *stbi__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) + wchar_t wMode[64]; + wchar_t wFilename[1024]; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename)/sizeof(*wFilename))) + return 0; + + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode)/sizeof(*wMode))) + return 0; + +#if defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != _wfopen_s(&f, wFilename, wMode)) + f = 0; +#else + f = _wfopen(wFilename, wMode); +#endif + +#elif defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != fopen_s(&f, filename, mode)) + f=0; +#else + f = fopen(filename, mode); +#endif + return f; +} + + +STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f,x,y,comp,req_comp); + fclose(f); + return result; +} + + +#endif //!STBI_NO_STDIO + +STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_mem(&s,buffer,len); + + result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp); + if (stbi__vertically_flip_on_load) { + stbi__vertical_flip_slices( result, *x, *y, *z, *comp ); + } + + return result; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *data; + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri); + if (hdr_data) + stbi__float_postprocess(hdr_data,x,y,comp,req_comp); + return hdr_data; + } + #endif + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); +} + +STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_STDIO +STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_file(&s,f); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} +#endif // !STBI_NO_STDIO + +#endif // !STBI_NO_LINEAR + +// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is +// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always +// reports false! + +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; + #endif +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result=0; + if (f) { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; +} + +STBIDEF int stbi_is_hdr_from_file(FILE *f) +{ + #ifndef STBI_NO_HDR + long pos = ftell(f); + int res; + stbi__context s; + stbi__start_file(&s,f); + res = stbi__hdr_test(&s); + fseek(f, pos, SEEK_SET); + return res; + #else + STBI_NOTUSED(f); + return 0; + #endif +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; + #endif +} + +#ifndef STBI_NO_LINEAR +static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f; + +STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } +STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } +#endif + +static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f; + +STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; } +STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; } + + +////////////////////////////////////////////////////////////////////////////// +// +// Common code used by all image loaders +// + +enum +{ + STBI__SCAN_load=0, + STBI__SCAN_type, + STBI__SCAN_header +}; + +static void stbi__refill_buffer(stbi__context *s) +{ + int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen); + s->callback_already_read += (int) (s->img_buffer - s->img_buffer_original); + if (n == 0) { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start+1; + *s->img_buffer = 0; + } else { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } +} + +stbi_inline static stbi_uc stbi__get8(stbi__context *s) +{ + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; +} + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_HDR) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +stbi_inline static int stbi__at_eof(stbi__context *s) +{ + if (s->io.read) { + if (!(s->io.eof)(s->io_user_data)) return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) return 1; + } + + return s->img_buffer >= s->img_buffer_end; +} +#endif + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) +// nothing +#else +static void stbi__skip(stbi__context *s, int n) +{ + if (n == 0) return; // already there! + if (n < 0) { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_TGA) && defined(STBI_NO_HDR) && defined(STBI_NO_PNM) +// nothing +#else +static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) +{ + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + int res, count; + + memcpy(buffer, s->img_buffer, blen); + + count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen); + res = (count == (n-blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } + + if (s->img_buffer+n <= s->img_buffer_end) { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } else + return 0; +} +#endif + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else +static int stbi__get16be(stbi__context *s) +{ + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else +static stbi__uint32 stbi__get32be(stbi__context *s) +{ + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); +} +#endif + +#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) +// nothing +#else +static int stbi__get16le(stbi__context *s) +{ + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); +} +#endif + +#ifndef STBI_NO_BMP +static stbi__uint32 stbi__get32le(stbi__context *s) +{ + stbi__uint32 z = stbi__get16le(s); + z += (stbi__uint32)stbi__get16le(s) << 16; + return z; +} +#endif + +#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +////////////////////////////////////////////////////////////////////////////// +// +// generic converter from built-in img_n to req_comp +// individual types do this automatically as much as possible (e.g. jpeg +// does all cases internally since it needs to colorspace convert anyway, +// and it never has alpha, so very few cases ). png can automatically +// interleave an alpha=255 channel, but falls back to this for other cases +// +// assume data buffer is malloced, so malloc a new one and free that one +// only failure mode is malloc failing + +static stbi_uc stbi__compute_y(int r, int g, int b) +{ + return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + unsigned char *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0); + if (good == NULL) { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + unsigned char *src = data + j * x * img_n ; + unsigned char *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=255; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=255; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=255; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = 255; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 stbi__compute_y_16(int r, int g, int b) +{ + return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + stbi__uint16 *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); + if (good == NULL) { + STBI_FREE(data); + return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + stbi__uint16 *src = data + j * x * img_n ; + stbi__uint16 *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=0xffff; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=0xffff; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=0xffff; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = 0xffff; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return (stbi__uint16*) stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) +{ + int i,k,n; + float *output; + if (!data) return NULL; + output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + } + if (n < comp) { + for (i=0; i < x*y; ++i) { + output[i*comp + n] = data[i*comp + n]/255.0f; + } + } + STBI_FREE(data); + return output; +} +#endif + +#ifndef STBI_NO_HDR +#define stbi__float2int(x) ((int) (x)) +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) +{ + int i,k,n; + stbi_uc *output; + if (!data) return NULL; + output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + if (k < comp) { + float z = data[i*comp+k] * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + } + STBI_FREE(data); + return output; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// "baseline" JPEG/JFIF decoder +// +// simple implementation +// - doesn't support delayed output of y-dimension +// - simple interface (only one output format: 8-bit interleaved RGB) +// - doesn't try to recover corrupt jpegs +// - doesn't allow partial loading, loading multiple at once +// - still fast on x86 (copying globals into locals doesn't help x86) +// - allocates lots of intermediate memory (full size of all components) +// - non-interleaved case requires this anyway +// - allows good upsampling (see next) +// high-quality +// - upsampled channels are bilinearly interpolated, even across blocks +// - quality integer IDCT derived from IJG's 'slow' +// performance +// - fast huffman; reasonable integer IDCT +// - some SIMD kernels for common paths on targets with SSE2/NEON +// - uses a lot of intermediate memory, could cache poorly + +#ifndef STBI_NO_JPEG + +// huffman decoding acceleration +#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache + +typedef struct +{ + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' +} stbi__huffman; + +typedef struct +{ + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi__uint16 dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; + +// sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; + +// definition of jpeg image component + struct + { + int id; + int h,v; + int tq; + int hd,ha; + int dc_pred; + + int x,y,w2,h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; + + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop + + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + int jfif; + int app14_color_transform; // Adobe APP14 tag + int rgb; + + int scan_n, order[4]; + int restart_interval, todo; + +// kernels + void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); +} stbi__jpeg; + +static int stbi__build_huffman(stbi__huffman *h, int *count) +{ + int i,j,k=0; + unsigned int code; + // build size list for each symbol (from JPEG spec) + for (i=0; i < 16; ++i) { + for (j=0; j < count[i]; ++j) { + h->size[k++] = (stbi_uc) (i+1); + if(k >= 257) return stbi__err("bad size list","Corrupt JPEG"); + } + } + h->size[k] = 0; + + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for(j=1; j <= 16; ++j) { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16) (code++); + if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16-j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; + + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i=0; i < k; ++i) { + int s = h->size[i]; + if (s <= FAST_BITS) { + int c = h->code[i] << (FAST_BITS-s); + int m = 1 << (FAST_BITS-s); + for (j=0; j < m; ++j) { + h->fast[c+j] = (stbi_uc) i; + } + } + } + return 1; +} + +// build a table that decodes both magnitude and value of small ACs in +// one go. +static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) +{ + int i; + for (i=0; i < (1 << FAST_BITS); ++i) { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; + + if (magbits && len + magbits <= FAST_BITS) { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) k += (~0U << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits)); + } + } + } +} + +static void stbi__grow_buffer_unsafe(stbi__jpeg *j) +{ + do { + unsigned int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) { + int c = stbi__get8(j->s); + while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes + if (c != 0) { + j->marker = (unsigned char) c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); +} + +// (1 << n) - 1 +static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; + +// decode a jpeg huffman value from the bitstream +stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) +{ + unsigned int temp; + int c,k; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + k = h->fast[c]; + if (k < 255) { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } + + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k=FAST_BITS+1 ; ; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) { + // error! code not found + j->code_bits -= 16; + return -1; + } + + if (k > j->code_bits) + return -1; + + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + if(c < 0 || c >= 256) // symbol id out of bounds! + return -1; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; +} + +// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); + if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s intead of continuing + + sgn = j->code_buffer >> 31; // sign bit always in MSB; 0 if MSB clear (positive), 1 if MSB set (negative) + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & (sgn - 1)); +} + +// get some unsigned bits +stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) +{ + unsigned int k; + if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s intead of continuing + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; +} + +stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) +{ + unsigned int k; + if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + if (j->code_bits < 1) return 0; // ran out of bits from stream, return 0s intead of continuing + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; +} + +// given a value that's at position X in the zigzag stream, +// where does it appear in the 8x8 matrix coded as row-major? +static const stbi_uc stbi__jpeg_dezigzag[64+15] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63 +}; + +// decode one 64-entry block-- +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant) +{ + int diff,dc,k; + int t; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0 || t > 15) return stbi__err("bad huffman code","Corrupt JPEG"); + + // 0 all the ac values now so we can do it 32-bits at a time + memset(data,0,64*sizeof(data[0])); + + diff = t ? stbi__extend_receive(j, t) : 0; + if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) return stbi__err("bad delta","Corrupt JPEG"); + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + if (!stbi__mul2shorts_valid(dc, dequant[0])) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + data[0] = (short) (dc * dequant[0]); + + // decode AC components, see JPEG spec + k = 1; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + if (s > j->code_bits) return stbi__err("bad huffman code", "Combined length longer than code bits available"); + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * dequant[zig]); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (rs != 0xf0) break; // end block + k += 16; + } else { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]); + } + } + } while (k < 64); + return 1; +} + +static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) +{ + int diff,dc; + int t; + if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + if (j->succ_high == 0) { + // first scan for DC coefficient, must be first + memset(data,0,64*sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0 || t > 15) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + diff = t ? stbi__extend_receive(j, t) : 0; + + if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) return stbi__err("bad delta", "Corrupt JPEG"); + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + if (!stbi__mul2shorts_valid(dc, 1 << j->succ_low)) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + data[0] = (short) (dc * (1 << j->succ_low)); + } else { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short) (1 << j->succ_low); + } + return 1; +} + +// @OPTIMIZE: store non-zigzagged during the decode passes, +// and only de-zigzag when dequantizing +static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) +{ + int k; + if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->succ_high == 0) { + int shift = j->succ_low; + + if (j->eob_run) { + --j->eob_run; + return 1; + } + + k = j->spec_start; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + if (s > j->code_bits) return stbi__err("bad huffman code", "Combined length longer than code bits available"); + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * (1 << shift)); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } else { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * (1 << shift)); + } + } + } while (k <= j->spec_end); + } else { + // refinement scan for these AC coefficients + + short bit = (short) (1 << j->succ_low); + + if (j->eob_run) { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } else { + k = j->spec_start; + do { + int r,s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } else { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } else { + if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } + + // advance by r + while (k <= j->spec_end) { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } else { + if (r == 0) { + *p = (short) s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; +} + +// take a -128..127 value and stbi__clamp it and convert to 0..255 +stbi_inline static stbi_uc stbi__clamp(int x) +{ + // trick to use a single test to catch both cases + if ((unsigned int) x > 255) { + if (x < 0) return 0; + if (x > 255) return 255; + } + return (stbi_uc) x; +} + +#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) * 4096) + +// derived from jidctint -- DCT_ISLOW +#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ + int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2+p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3*stbi__f2f(-1.847759065f); \ + t3 = p1 + p2*stbi__f2f( 0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2+p3); \ + t1 = stbi__fsh(p2-p3); \ + x0 = t0+t3; \ + x3 = t0-t3; \ + x1 = t1+t2; \ + x2 = t1-t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0+t2; \ + p4 = t1+t3; \ + p1 = t0+t3; \ + p2 = t1+t2; \ + p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ + t0 = t0*stbi__f2f( 0.298631336f); \ + t1 = t1*stbi__f2f( 2.053119869f); \ + t2 = t2*stbi__f2f( 3.072711026f); \ + t3 = t3*stbi__f2f( 1.501321110f); \ + p1 = p5 + p1*stbi__f2f(-0.899976223f); \ + p2 = p5 + p2*stbi__f2f(-2.562915447f); \ + p3 = p3*stbi__f2f(-1.961570560f); \ + p4 = p4*stbi__f2f(-0.390180644f); \ + t3 += p1+p4; \ + t2 += p2+p3; \ + t1 += p2+p4; \ + t0 += p1+p3; + +static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) +{ + int i,val[64],*v=val; + stbi_uc *o; + short *d = data; + + // columns + for (i=0; i < 8; ++i,++d, ++v) { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0 + && d[40]==0 && d[48]==0 && d[56]==0) { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0]*4; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } else { + STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; x1 += 512; x2 += 512; x3 += 512; + v[ 0] = (x0+t3) >> 10; + v[56] = (x0-t3) >> 10; + v[ 8] = (x1+t2) >> 10; + v[48] = (x1-t2) >> 10; + v[16] = (x2+t1) >> 10; + v[40] = (x2-t1) >> 10; + v[24] = (x3+t0) >> 10; + v[32] = (x3-t0) >> 10; + } + } + + for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128<<17); + x1 += 65536 + (128<<17); + x2 += 65536 + (128<<17); + x3 += 65536 + (128<<17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0+t3) >> 17); + o[7] = stbi__clamp((x0-t3) >> 17); + o[1] = stbi__clamp((x1+t2) >> 17); + o[6] = stbi__clamp((x1-t2) >> 17); + o[2] = stbi__clamp((x2+t1) >> 17); + o[5] = stbi__clamp((x2-t1) >> 17); + o[3] = stbi__clamp((x3+t0) >> 17); + o[4] = stbi__clamp((x3-t0) >> 17); + } +} + +#ifdef STBI_SSE2 +// sse2 integer IDCT. not the fastest possible implementation but it +// produces bit-identical results to the generic C version so it's +// fully "transparent". +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; + + // dot product constant: even elems=x, odd elems=y + #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) + + // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) + // out(1) = c1[even]*x + c1[odd]*y + #define dct_rot(out0,out1, x,y,c0,c1) \ + __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + + // out = in << 12 (in 16-bit, out 32-bit) + #define dct_widen(out, in) \ + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + + // wide add + #define dct_wadd(out, a, b) \ + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + + // wide sub + #define dct_wsub(out, a, b) \ + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + + // butterfly a/b, add bias, then shift by "s" and pack + #define dct_bfly32o(out0, out1, a,b,bias,s) \ + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } + + // 8-bit interleave step (for transposes) + #define dct_interleave8(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) + + // 16-bit interleave step (for transposes) + #define dct_interleave16(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) + + #define dct_pass(bias,shift) \ + { \ + /* even part */ \ + dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ + dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0,row7, x0,x7,bias,shift); \ + dct_bfly32o(row1,row6, x1,x6,bias,shift); \ + dct_bfly32o(row2,row5, x2,x5,bias,shift); \ + dct_bfly32o(row3,row4, x3,x4,bias,shift); \ + } + + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f)); + + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17)); + + // load + row0 = _mm_load_si128((const __m128i *) (data + 0*8)); + row1 = _mm_load_si128((const __m128i *) (data + 1*8)); + row2 = _mm_load_si128((const __m128i *) (data + 2*8)); + row3 = _mm_load_si128((const __m128i *) (data + 3*8)); + row4 = _mm_load_si128((const __m128i *) (data + 4*8)); + row5 = _mm_load_si128((const __m128i *) (data + 5*8)); + row6 = _mm_load_si128((const __m128i *) (data + 6*8)); + row7 = _mm_load_si128((const __m128i *) (data + 7*8)); + + // column pass + dct_pass(bias_0, 10); + + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); + + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); + + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } + + // row pass + dct_pass(bias_1, 17); + + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); + + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... + + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... + + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... + + // store + _mm_storel_epi64((__m128i *) out, p0); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p2); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p1); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p3); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); + } + +#undef dct_const +#undef dct_rot +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_interleave8 +#undef dct_interleave16 +#undef dct_pass +} + +#endif // STBI_SSE2 + +#ifdef STBI_NEON + +// NEON integer IDCT. should produce bit-identical +// results to the generic C version. +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f)); + +#define dct_long_mul(out, inq, coeff) \ + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + +#define dct_long_mac(out, acc, inq, coeff) \ + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + +#define dct_widen(out, inq) \ + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + +// wide add +#define dct_wadd(out, a, b) \ + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + +// wide sub +#define dct_wsub(out, a, b) \ + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + +// butterfly a/b, then shift using "shiftop" by "s" and pack +#define dct_bfly32o(out0,out1, a,b,shiftop,s) \ + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } + +#define dct_pass(shiftop, shift) \ + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ + dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ + dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ + dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ + } + + // load + row0 = vld1q_s16(data + 0*8); + row1 = vld1q_s16(data + 1*8); + row2 = vld1q_s16(data + 2*8); + row3 = vld1q_s16(data + 3*8); + row4 = vld1q_s16(data + 4*8); + row5 = vld1q_s16(data + 5*8); + row6 = vld1q_s16(data + 6*8); + row7 = vld1q_s16(data + 7*8); + + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + + // column pass + dct_pass(vrshrn_n_s32, 10); + + // 16bit 8x8 transpose + { +// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. +// whether compilers actually get this is another story, sadly. +#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } +#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } + + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); + + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); + + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); + +#undef dct_trn16 +#undef dct_trn32 +#undef dct_trn64 + } + + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); + + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + + // again, these can translate into one instruction, but often don't. +#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } +#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } + + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! + + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); + + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); + + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); + + // store + vst1_u8(out, p0); out += out_stride; + vst1_u8(out, p1); out += out_stride; + vst1_u8(out, p2); out += out_stride; + vst1_u8(out, p3); out += out_stride; + vst1_u8(out, p4); out += out_stride; + vst1_u8(out, p5); out += out_stride; + vst1_u8(out, p6); out += out_stride; + vst1_u8(out, p7); + +#undef dct_trn8_8 +#undef dct_trn8_16 +#undef dct_trn8_32 + } + +#undef dct_long_mul +#undef dct_long_mac +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_pass +} + +#endif // STBI_NEON + +#define STBI__MARKER_none 0xff +// if there's a pending marker from the entropy stream, return that +// otherwise, fetch from the stream and get a marker. if there's no +// marker, return 0xff, which is never a valid marker value +static stbi_uc stbi__get_marker(stbi__jpeg *j) +{ + stbi_uc x; + if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } + x = stbi__get8(j->s); + if (x != 0xff) return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); // consume repeated 0xff fill bytes + return x; +} + +// in each scan, we'll have scan_n components, and the order +// of the components is specified by order[] +#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) + +// after a restart interval, stbi__jpeg_reset the entropy decoder and +// the dc prediction +static void stbi__jpeg_reset(stbi__jpeg *j) +{ + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels +} + +static int stbi__parse_entropy_coded_data(stbi__jpeg *z) +{ + stbi__jpeg_reset(z); + if (!z->progressive) { + if (z->scan_n == 1) { + int i,j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + STBI_SIMD_ALIGN(short, data[64]); + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x)*8; + int y2 = (j*z->img_comp[n].v + y)*8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } else { + if (z->scan_n == 1) { + int i,j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } else { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x); + int y2 = (j*z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } +} + +static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant) +{ + int i; + for (i=0; i < 64; ++i) + data[i] *= dequant[i]; +} + +static void stbi__jpeg_finish(stbi__jpeg *z) +{ + if (z->progressive) { + // dequantize and idct the data + int i,j,n; + for (n=0; n < z->s->img_n; ++n) { + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + } + } + } + } +} + +static int stbi__process_marker(stbi__jpeg *z, int m) +{ + int L; + switch (m) { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker","Corrupt JPEG"); + + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; + + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s)-2; + while (L > 0) { + int q = stbi__get8(z->s); + int p = q >> 4, sixteen = (p != 0); + int t = q & 15,i; + if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG"); + if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG"); + + for (i=0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s)); + L -= (sixteen ? 129 : 65); + } + return L==0; + + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s)-2; + while (L > 0) { + stbi_uc *v; + int sizes[16],i,n=0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG"); + for (i=0; i < 16; ++i) { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + if(n > 256) return stbi__err("bad DHT header","Corrupt JPEG"); // Loop over i < n would write past end of values! + L -= 17; + if (tc == 0) { + if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; + v = z->huff_dc[th].values; + } else { + if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0; + v = z->huff_ac[th].values; + } + for (i=0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L==0; + } + + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { + L = stbi__get16be(z->s); + if (L < 2) { + if (m == 0xFE) + return stbi__err("bad COM len","Corrupt JPEG"); + else + return stbi__err("bad APP len","Corrupt JPEG"); + } + L -= 2; + + if (m == 0xE0 && L >= 5) { // JFIF APP0 segment + static const unsigned char tag[5] = {'J','F','I','F','\0'}; + int ok = 1; + int i; + for (i=0; i < 5; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 5; + if (ok) + z->jfif = 1; + } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment + static const unsigned char tag[6] = {'A','d','o','b','e','\0'}; + int ok = 1; + int i; + for (i=0; i < 6; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 6; + if (ok) { + stbi__get8(z->s); // version + stbi__get16be(z->s); // flags0 + stbi__get16be(z->s); // flags1 + z->app14_color_transform = stbi__get8(z->s); // color transform + L -= 6; + } + } + + stbi__skip(z->s, L); + return 1; + } + + return stbi__err("unknown marker","Corrupt JPEG"); +} + +// after we see SOS +static int stbi__process_scan_header(stbi__jpeg *z) +{ + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG"); + if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG"); + for (i=0; i < z->scan_n; ++i) { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) return 0; // no match + z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG"); + z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG"); + z->order[i] = which; + } + + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } else { + if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG"); + z->spec_end = 63; + } + } + + return 1; +} + +static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) +{ + int i; + for (i=0; i < ncomp; ++i) { + if (z->img_comp[i].raw_data) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; +} + +static int stbi__process_frame_header(stbi__jpeg *z, int scan) +{ + stbi__context *s = z->s; + int Lf,p,i,q, h_max=1,v_max=1,c; + Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG + p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + c = stbi__get8(s); + if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG"); + s->img_n = c; + for (i=0; i < c; ++i) { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } + + if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG"); + + z->rgb = 0; + for (i=0; i < s->img_n; ++i) { + static const unsigned char rgb[3] = { 'R', 'G', 'B' }; + z->img_comp[i].id = stbi__get8(s); + if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) + ++z->rgb; + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG"); + z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG"); + } + + if (scan != STBI__SCAN_load) return 1; + + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); + + for (i=0; i < s->img_n; ++i) { + if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; + } + + // check that plane subsampling factors are integer ratios; our resamplers can't deal with fractional ratios + // and I've never seen a non-corrupted JPEG file actually use them + for (i=0; i < s->img_n; ++i) { + if (h_max % z->img_comp[i].h != 0) return stbi__err("bad H","Corrupt JPEG"); + if (v_max % z->img_comp[i].v != 0) return stbi__err("bad V","Corrupt JPEG"); + } + + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits + z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h; + + for (i=0; i < s->img_n; ++i) { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15); + if (z->progressive) { + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15); + } + } + + return 1; +} + +// use comparisons since in some cases we handle more than one case (e.g. SOF) +#define stbi__DNL(x) ((x) == 0xdc) +#define stbi__SOI(x) ((x) == 0xd8) +#define stbi__EOI(x) ((x) == 0xd9) +#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) +#define stbi__SOS(x) ((x) == 0xda) + +#define stbi__SOF_progressive(x) ((x) == 0xc2) + +static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) +{ + int m; + z->jfif = 0; + z->app14_color_transform = -1; // valid values are 0,1,2 + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG"); + if (scan == STBI__SCAN_type) return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) { + if (!stbi__process_marker(z,m)) return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) return 0; + return 1; +} + +static stbi_uc stbi__skip_jpeg_junk_at_end(stbi__jpeg *j) +{ + // some JPEGs have junk at end, skip over it but if we find what looks + // like a valid marker, resume there + while (!stbi__at_eof(j->s)) { + stbi_uc x = stbi__get8(j->s); + while (x == 0xff) { // might be a marker + if (stbi__at_eof(j->s)) return STBI__MARKER_none; + x = stbi__get8(j->s); + if (x != 0x00 && x != 0xff) { + // not a stuffed zero or lead-in to another marker, looks + // like an actual marker, return it + return x; + } + // stuffed zero has x=0 now which ends the loop, meaning we go + // back to regular scan loop. + // repeated 0xff keeps trying to read the next byte of the marker. + } + } + return STBI__MARKER_none; +} + +// decode image to YCbCr format +static int stbi__decode_jpeg_image(stbi__jpeg *j) +{ + int m; + for (m = 0; m < 4; m++) { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) { + if (stbi__SOS(m)) { + if (!stbi__process_scan_header(j)) return 0; + if (!stbi__parse_entropy_coded_data(j)) return 0; + if (j->marker == STBI__MARKER_none ) { + j->marker = stbi__skip_jpeg_junk_at_end(j); + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + m = stbi__get_marker(j); + if (STBI__RESTART(m)) + m = stbi__get_marker(j); + } else if (stbi__DNL(m)) { + int Ld = stbi__get16be(j->s); + stbi__uint32 NL = stbi__get16be(j->s); + if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG"); + if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG"); + m = stbi__get_marker(j); + } else { + if (!stbi__process_marker(j, m)) return 1; + m = stbi__get_marker(j); + } + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; +} + +// static jfif-centered resampling (across block boundaries) + +typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, + int w, int hs); + +#define stbi__div4(x) ((stbi_uc) ((x) >> 2)) + +static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; +} + +static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i=0; i < w; ++i) + out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2); + return out; +} + +static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; + + if (w == 1) { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } + + out[0] = input[0]; + out[1] = stbi__div4(input[0]*3 + input[1] + 2); + for (i=1; i < w-1; ++i) { + int n = 3*input[i]+2; + out[i*2+0] = stbi__div4(n+input[i-1]); + out[i*2+1] = stbi__div4(n+input[i+1]); + } + out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2); + out[i*2+1] = input[w-1]; + + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); + + return out; +} + +#define stbi__div16(x) ((stbi_uc) ((x) >> 4)) + +static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i,t0,t1; + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + out[0] = stbi__div4(t1+2); + for (i=1; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i=0,t0,t1; + + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w-1) & ~7); i += 8) { +#if defined(STBI_SSE2) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); + + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); + + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *) (out + i*2), outv); +#elif defined(STBI_NEON) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); + + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i*2, o); +#endif + + // "previous" value for next iter + t1 = 3*in_near[i+7] + in_far[i+7]; + } + + t0 = t1; + t1 = 3*in_near[i] + in_far[i]; + out[i*2] = stbi__div16(3*t1 + t0 + 8); + + for (++i; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} +#endif + +static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // resample with nearest-neighbor + int i,j; + STBI_NOTUSED(in_far); + for (i=0; i < w; ++i) + for (j=0; j < hs; ++j) + out[i*hs+j] = in_near[i]; + return out; +} + +// this is a reduced-precision calculation of YCbCr-to-RGB introduced +// to make sure the code produces the same results in both SIMD and scalar +#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i=0; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) +{ + int i = 0; + +#ifdef STBI_SSE2 + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f)); + __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f)); + __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f)); + __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f)); + __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128); + __m128i xw = _mm_set1_epi16(255); // alpha channel + + for (; i+7 < count; i += 8) { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); + + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); + + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); + + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); + + // store + _mm_storeu_si128((__m128i *) (out + 0), o0); + _mm_storeu_si128((__m128i *) (out + 16), o1); + out += 32; + } + } +#endif + +#ifdef STBI_NEON + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f)); + int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f)); + int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f)); + int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f)); + + for (; i+7 < count; i += 8) { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); + + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); + + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); + + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8*4; + } + } +#endif + + for (; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +// set up the kernels +static void stbi__setup_jpeg(stbi__jpeg *j) +{ + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + +#ifdef STBI_SSE2 + if (stbi__sse2_available()) { + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } +#endif + +#ifdef STBI_NEON + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; +#endif +} + +// clean up the temporary component buffers +static void stbi__cleanup_jpeg(stbi__jpeg *j) +{ + stbi__free_jpeg_components(j, j->s->img_n, 0); +} + +typedef struct +{ + resample_row_func resample; + stbi_uc *line0,*line1; + int hs,vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on +} stbi__resample; + +// fast 0..255 * 0..255 => 0..255 rounded multiplication +static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y) +{ + unsigned int t = x*y + 128; + return (stbi_uc) ((t + (t >>8)) >> 8); +} + +static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) +{ + int n, decode_n, is_rgb; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe + + // validate req_comp + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } + + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1; + + is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif)); + + if (z->s->img_n == 3 && n < 3 && !is_rgb) + decode_n = 1; + else + decode_n = z->s->img_n; + + // nothing to do if no components requested; check this now to avoid + // accessing uninitialized coutput[0] later + if (decode_n <= 0) { stbi__cleanup_jpeg(z); return NULL; } + + // resample and color-convert + { + int k; + unsigned int i,j; + stbi_uc *output; + stbi_uc *coutput[4] = { NULL, NULL, NULL, NULL }; + + stbi__resample res_comp[4]; + + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs-1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; + + if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; + else r->resample = stbi__resample_row_generic; + } + + // can't error after this so, this is safe + output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); + if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + // now go ahead and resample + for (j=0; j < z->s->img_y; ++j) { + stbi_uc *out = output + n * z->s->img_x * j; + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) { + if (is_rgb) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = y[i]; + out[1] = coutput[1][i]; + out[2] = coutput[2][i]; + out[3] = 255; + out += n; + } + } else { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else if (z->s->img_n == 4) { + if (z->app14_color_transform == 0) { // CMYK + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(coutput[0][i], m); + out[1] = stbi__blinn_8x8(coutput[1][i], m); + out[2] = stbi__blinn_8x8(coutput[2][i], m); + out[3] = 255; + out += n; + } + } else if (z->app14_color_transform == 2) { // YCCK + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(255 - out[0], m); + out[1] = stbi__blinn_8x8(255 - out[1], m); + out[2] = stbi__blinn_8x8(255 - out[2], m); + out += n; + } + } else { // YCbCr + alpha? Ignore the fourth channel for now + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else + for (i=0; i < z->s->img_x; ++i) { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } else { + if (is_rgb) { + if (n == 1) + for (i=0; i < z->s->img_x; ++i) + *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + else { + for (i=0; i < z->s->img_x; ++i, out += 2) { + out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + out[1] = 255; + } + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 0) { + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + stbi_uc r = stbi__blinn_8x8(coutput[0][i], m); + stbi_uc g = stbi__blinn_8x8(coutput[1][i], m); + stbi_uc b = stbi__blinn_8x8(coutput[2][i], m); + out[0] = stbi__compute_y(r, g, b); + out[1] = 255; + out += n; + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 2) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]); + out[1] = 255; + out += n; + } + } else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i=0; i < z->s->img_x; ++i) { *out++ = y[i]; *out++ = 255; } + } + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output + return output; + } +} + +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + unsigned char* result; + stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); + if (!j) return stbi__errpuc("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); + STBI_NOTUSED(ri); + j->s = s; + stbi__setup_jpeg(j); + result = load_jpeg_image(j, x,y,comp,req_comp); + STBI_FREE(j); + return result; +} + +static int stbi__jpeg_test(stbi__context *s) +{ + int r; + stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + if (!j) return stbi__err("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); + j->s = s; + stbi__setup_jpeg(j); + r = stbi__decode_jpeg_header(j, STBI__SCAN_type); + stbi__rewind(s); + STBI_FREE(j); + return r; +} + +static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) +{ + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { + stbi__rewind( j->s ); + return 0; + } + if (x) *x = j->s->img_x; + if (y) *y = j->s->img_y; + if (comp) *comp = j->s->img_n >= 3 ? 3 : 1; + return 1; +} + +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) +{ + int result; + stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); + if (!j) return stbi__err("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); + j->s = s; + result = stbi__jpeg_info_raw(j, x, y, comp); + STBI_FREE(j); + return result; +} +#endif + +// public domain zlib decode v0.2 Sean Barrett 2006-11-18 +// simple implementation +// - all input must be provided in an upfront buffer +// - all output is written to a single output buffer (can malloc/realloc) +// performance +// - fast huffman + +#ifndef STBI_NO_ZLIB + +// fast-way is faster to check than jpeg huffman, but slow way is slower +#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables +#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) +#define STBI__ZNSYMS 288 // number of symbols in literal/length alphabet + +// zlib-style huffman encoding +// (jpegs packs from left, zlib from right, so can't share code) +typedef struct +{ + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[STBI__ZNSYMS]; + stbi__uint16 value[STBI__ZNSYMS]; +} stbi__zhuffman; + +stbi_inline static int stbi__bitreverse16(int n) +{ + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; +} + +stbi_inline static int stbi__bit_reverse(int v, int bits) +{ + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16-bits); +} + +static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num) +{ + int i,k=0; + int code, next_code[16], sizes[17]; + + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i=0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i=1; i < 16; ++i) + if (sizes[i] > (1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i=1; i < 16; ++i) { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16) code; + z->firstsymbol[i] = (stbi__uint16) k; + code = (code + sizes[i]); + if (sizes[i]) + if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG"); + z->maxcode[i] = code << (16-i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i=0; i < num; ++i) { + int s = sizelist[i]; + if (s) { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i); + z->size [c] = (stbi_uc ) s; + z->value[c] = (stbi__uint16) i; + if (s <= STBI__ZFAST_BITS) { + int j = stbi__bit_reverse(next_code[s],s); + while (j < (1 << STBI__ZFAST_BITS)) { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; +} + +// zlib-from-memory implementation for PNG reading +// because PNG allows splitting the zlib stream arbitrarily, +// and it's annoying structurally to have PNG call ZLIB call PNG, +// we require PNG read all the IDATs and combine them into a single +// memory buffer + +typedef struct +{ + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + int hit_zeof_once; + stbi__uint32 code_buffer; + + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; + + stbi__zhuffman z_length, z_distance; +} stbi__zbuf; + +stbi_inline static int stbi__zeof(stbi__zbuf *z) +{ + return (z->zbuffer >= z->zbuffer_end); +} + +stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) +{ + return stbi__zeof(z) ? 0 : *z->zbuffer++; +} + +static void stbi__fill_bits(stbi__zbuf *z) +{ + do { + if (z->code_buffer >= (1U << z->num_bits)) { + z->zbuffer = z->zbuffer_end; /* treat this as EOF so we fail. */ + return; + } + z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); +} + +stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) +{ + unsigned int k; + if (z->num_bits < n) stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; +} + +static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s,k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s=STBI__ZFAST_BITS+1; ; ++s) + if (k < z->maxcode[s]) + break; + if (s >= 16) return -1; // invalid code! + // code size is s, so: + b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; + if (b >= STBI__ZNSYMS) return -1; // some data was corrupt somewhere! + if (z->size[b] != s) return -1; // was originally an assert, but report failure instead. + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; +} + +stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s; + if (a->num_bits < 16) { + if (stbi__zeof(a)) { + if (!a->hit_zeof_once) { + // This is the first time we hit eof, insert 16 extra padding btis + // to allow us to keep going; if we actually consume any of them + // though, that is invalid data. This is caught later. + a->hit_zeof_once = 1; + a->num_bits += 16; // add 16 implicit zero bits + } else { + // We already inserted our extra 16 padding bits and are again + // out, this stream is actually prematurely terminated. + return -1; + } + } else { + stbi__fill_bits(a); + } + } + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); +} + +static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes +{ + char *q; + unsigned int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG"); + cur = (unsigned int) (z->zout - z->zout_start); + limit = old_limit = (unsigned) (z->zout_end - z->zout_start); + if (UINT_MAX - cur < (unsigned) n) return stbi__err("outofmem", "Out of memory"); + while (cur + n > limit) { + if(limit > UINT_MAX / 2) return stbi__err("outofmem", "Out of memory"); + limit *= 2; + } + q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; +} + +static const int stbi__zlength_base[31] = { + 3,4,5,6,7,8,9,10,11,13, + 15,17,19,23,27,31,35,43,51,59, + 67,83,99,115,131,163,195,227,258,0,0 }; + +static const int stbi__zlength_extra[31]= +{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; + +static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; + +static const int stbi__zdist_extra[32] = +{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +static int stbi__parse_huffman_block(stbi__zbuf *a) +{ + char *zout = a->zout; + for(;;) { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) { + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) { + if (!stbi__zexpand(a, zout, 1)) return 0; + zout = a->zout; + } + *zout++ = (char) z; + } else { + stbi_uc *p; + int len,dist; + if (z == 256) { + a->zout = zout; + if (a->hit_zeof_once && a->num_bits < 16) { + // The first time we hit zeof, we inserted 16 extra zero bits into our bit + // buffer so the decoder can just do its speculative decoding. But if we + // actually consumed any of those bits (which is the case when num_bits < 16), + // the stream actually read past the end so it is malformed. + return stbi__err("unexpected end","Corrupt PNG"); + } + return 1; + } + if (z >= 286) return stbi__err("bad huffman code","Corrupt PNG"); // per DEFLATE, length codes 286 and 287 must not appear in compressed data + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0 || z >= 30) return stbi__err("bad huffman code","Corrupt PNG"); // per DEFLATE, distance codes 30 and 31 must not appear in compressed data + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); + if (len > a->zout_end - zout) { + if (!stbi__zexpand(a, zout, len)) return 0; + zout = a->zout; + } + p = (stbi_uc *) (zout - dist); + if (dist == 1) { // run of one byte; common in images. + stbi_uc v = *p; + if (len) { do *zout++ = v; while (--len); } + } else { + if (len) { do *zout++ = *p++; while (--len); } + } + } + } +} + +static int stbi__compute_huffman_codes(stbi__zbuf *a) +{ + static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286+32+137];//padding for maximum single op + stbi_uc codelength_sizes[19]; + int i,n; + + int hlit = stbi__zreceive(a,5) + 257; + int hdist = stbi__zreceive(a,5) + 1; + int hclen = stbi__zreceive(a,4) + 4; + int ntot = hlit + hdist; + + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i=0; i < hclen; ++i) { + int s = stbi__zreceive(a,3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc) s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; + + n = 0; + while (n < ntot) { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc) c; + else { + stbi_uc fill = 0; + if (c == 16) { + c = stbi__zreceive(a,2)+3; + if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n-1]; + } else if (c == 17) { + c = stbi__zreceive(a,3)+3; + } else if (c == 18) { + c = stbi__zreceive(a,7)+11; + } else { + return stbi__err("bad codelengths", "Corrupt PNG"); + } + if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes+n, fill, c); + n += c; + } + } + if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0; + return 1; +} + +static int stbi__parse_uncompressed_block(stbi__zbuf *a) +{ + stbi_uc header[4]; + int len,nlen,k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) { + header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + if (a->num_bits < 0) return stbi__err("zlib corrupt","Corrupt PNG"); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; +} + +static int stbi__parse_zlib_header(stbi__zbuf *a) +{ + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if (stbi__zeof(a)) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; +} + +static const stbi_uc stbi__zdefault_length[STBI__ZNSYMS] = +{ + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8 +}; +static const stbi_uc stbi__zdefault_distance[32] = +{ + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 +}; +/* +Init algorithm: +{ + int i; // use <= to match clearly with spec + for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8; + for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9; + for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7; + for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8; + + for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; +} +*/ + +static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) +{ + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) return 0; + a->num_bits = 0; + a->code_buffer = 0; + a->hit_zeof_once = 0; + do { + final = stbi__zreceive(a,1); + type = stbi__zreceive(a,2); + if (type == 0) { + if (!stbi__parse_uncompressed_block(a)) return 0; + } else if (type == 3) { + return 0; + } else { + if (type == 1) { + // use fixed code lengths + if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , STBI__ZNSYMS)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; + } else { + if (!stbi__compute_huffman_codes(a)) return 0; + } + if (!stbi__parse_huffman_block(a)) return 0; + } + } while (!final); + return 1; +} + +static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) +{ + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; + + return stbi__parse_zlib(a, parse_header); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) +{ + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int) (a.zout - a.zout_start); + else + return -1; +} + +STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(16384); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer+len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int) (a.zout - a.zout_start); + else + return -1; +} +#endif + +// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 +// simple implementation +// - only 8-bit samples +// - no CRC checking +// - allocates lots of intermediate memory +// - avoids problem of streaming data between subsystems +// - avoids explicit window management +// performance +// - uses stb_zlib, a PD zlib implementation with fast huffman decoding + +#ifndef STBI_NO_PNG +typedef struct +{ + stbi__uint32 length; + stbi__uint32 type; +} stbi__pngchunk; + +static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) +{ + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; +} + +static int stbi__check_png_header(stbi__context *s) +{ + static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + int i; + for (i=0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG"); + return 1; +} + +typedef struct +{ + stbi__context *s; + stbi_uc *idata, *expanded, *out; + int depth; +} stbi__png; + + +enum { + STBI__F_none=0, + STBI__F_sub=1, + STBI__F_up=2, + STBI__F_avg=3, + STBI__F_paeth=4, + // synthetic filter used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first +}; + +static stbi_uc first_row_filter[5] = +{ + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_sub // Paeth with b=c=0 turns out to be equivalent to sub +}; + +static int stbi__paeth(int a, int b, int c) +{ + // This formulation looks very different from the reference in the PNG spec, but is + // actually equivalent and has favorable data dependencies and admits straightforward + // generation of branch-free code, which helps performance significantly. + int thresh = c*3 - (a + b); + int lo = a < b ? a : b; + int hi = a < b ? b : a; + int t0 = (hi <= thresh) ? lo : c; + int t1 = (thresh <= lo) ? hi : t0; + return t1; +} + +static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; + +// adds an extra all-255 alpha channel +// dest == src is legal +// img_n must be 1 or 3 +static void stbi__create_png_alpha_expand8(stbi_uc *dest, stbi_uc *src, stbi__uint32 x, int img_n) +{ + int i; + // must process data backwards since we allow dest==src + if (img_n == 1) { + for (i=x-1; i >= 0; --i) { + dest[i*2+1] = 255; + dest[i*2+0] = src[i]; + } + } else { + STBI_ASSERT(img_n == 3); + for (i=x-1; i >= 0; --i) { + dest[i*4+3] = 255; + dest[i*4+2] = src[i*3+2]; + dest[i*4+1] = src[i*3+1]; + dest[i*4+0] = src[i*3+0]; + } + } +} + +// create the png data from post-deflated data +static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) +{ + int bytes = (depth == 16 ? 2 : 1); + stbi__context *s = a->s; + stbi__uint32 i,j,stride = x*out_n*bytes; + stbi__uint32 img_len, img_width_bytes; + stbi_uc *filter_buf; + int all_ok = 1; + int k; + int img_n = s->img_n; // copy it into a local for later + + int output_bytes = out_n*bytes; + int filter_bytes = img_n*bytes; + int width = x; + + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1); + a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into + if (!a->out) return stbi__err("outofmem", "Out of memory"); + + // note: error exits here don't need to clean up a->out individually, + // stbi__do_png always does on error. + if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG"); + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + if (!stbi__mad2sizes_valid(img_width_bytes, y, img_width_bytes)) return stbi__err("too large", "Corrupt PNG"); + img_len = (img_width_bytes + 1) * y; + + // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, + // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), + // so just check for raw_len < img_len always. + if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); + + // Allocate two scan lines worth of filter workspace buffer. + filter_buf = (stbi_uc *) stbi__malloc_mad2(img_width_bytes, 2, 0); + if (!filter_buf) return stbi__err("outofmem", "Out of memory"); + + // Filtering for low-bit-depth images + if (depth < 8) { + filter_bytes = 1; + width = img_width_bytes; + } + + for (j=0; j < y; ++j) { + // cur/prior filter buffers alternate + stbi_uc *cur = filter_buf + (j & 1)*img_width_bytes; + stbi_uc *prior = filter_buf + (~j & 1)*img_width_bytes; + stbi_uc *dest = a->out + stride*j; + int nk = width * filter_bytes; + int filter = *raw++; + + // check filter type + if (filter > 4) { + all_ok = stbi__err("invalid filter","Corrupt PNG"); + break; + } + + // if first row, use special filter that doesn't sample previous row + if (j == 0) filter = first_row_filter[filter]; + + // perform actual filtering + switch (filter) { + case STBI__F_none: + memcpy(cur, raw, nk); + break; + case STBI__F_sub: + memcpy(cur, raw, filter_bytes); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); + break; + case STBI__F_up: + for (k = 0; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + prior[k]); + break; + case STBI__F_avg: + for (k = 0; k < filter_bytes; ++k) + cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); + break; + case STBI__F_paeth: + for (k = 0; k < filter_bytes; ++k) + cur[k] = STBI__BYTECAST(raw[k] + prior[k]); // prior[k] == stbi__paeth(0,prior[k],0) + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes], prior[k], prior[k-filter_bytes])); + break; + case STBI__F_avg_first: + memcpy(cur, raw, filter_bytes); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); + break; + } + + raw += nk; + + // expand decoded bits in cur to dest, also adding an extra alpha channel if desired + if (depth < 8) { + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + stbi_uc *in = cur; + stbi_uc *out = dest; + stbi_uc inb = 0; + stbi__uint32 nsmp = x*img_n; + + // expand bits to bytes first + if (depth == 4) { + for (i=0; i < nsmp; ++i) { + if ((i & 1) == 0) inb = *in++; + *out++ = scale * (inb >> 4); + inb <<= 4; + } + } else if (depth == 2) { + for (i=0; i < nsmp; ++i) { + if ((i & 3) == 0) inb = *in++; + *out++ = scale * (inb >> 6); + inb <<= 2; + } + } else { + STBI_ASSERT(depth == 1); + for (i=0; i < nsmp; ++i) { + if ((i & 7) == 0) inb = *in++; + *out++ = scale * (inb >> 7); + inb <<= 1; + } + } + + // insert alpha=255 values if desired + if (img_n != out_n) + stbi__create_png_alpha_expand8(dest, dest, x, img_n); + } else if (depth == 8) { + if (img_n == out_n) + memcpy(dest, cur, x*img_n); + else + stbi__create_png_alpha_expand8(dest, cur, x, img_n); + } else if (depth == 16) { + // convert the image data from big-endian to platform-native + stbi__uint16 *dest16 = (stbi__uint16*)dest; + stbi__uint32 nsmp = x*img_n; + + if (img_n == out_n) { + for (i = 0; i < nsmp; ++i, ++dest16, cur += 2) + *dest16 = (cur[0] << 8) | cur[1]; + } else { + STBI_ASSERT(img_n+1 == out_n); + if (img_n == 1) { + for (i = 0; i < x; ++i, dest16 += 2, cur += 2) { + dest16[0] = (cur[0] << 8) | cur[1]; + dest16[1] = 0xffff; + } + } else { + STBI_ASSERT(img_n == 3); + for (i = 0; i < x; ++i, dest16 += 4, cur += 6) { + dest16[0] = (cur[0] << 8) | cur[1]; + dest16[1] = (cur[2] << 8) | cur[3]; + dest16[2] = (cur[4] << 8) | cur[5]; + dest16[3] = 0xffff; + } + } + } + } + } + + STBI_FREE(filter_buf); + if (!all_ok) return 0; + + return 1; +} + +static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) +{ + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + + // de-interlacing + final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + if (!final) return stbi__err("outofmem", "Out of memory"); + for (p=0; p < 7; ++p) { + int xorig[] = { 0,4,0,2,0,1,0 }; + int yorig[] = { 0,0,4,0,2,0,1 }; + int xspc[] = { 8,8,4,4,2,2,1 }; + int yspc[] = { 8,8,8,4,4,2,2 }; + int i,j,x,y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p]; + if (x && y) { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { + STBI_FREE(final); + return 0; + } + for (j=0; j < y; ++j) { + for (i=0; i < x; ++i) { + int out_y = j*yspc[p]+yorig[p]; + int out_x = i*xspc[p]+xorig[p]; + memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, + a->out + (j*x+i)*out_bytes, out_bytes); + } + } + STBI_FREE(a->out); + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; + + return 1; +} + +static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i=0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } else { + for (i=0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi__uint16 *p = (stbi__uint16*) z->out; + + // compute color-based transparency, assuming we've + // already got 65535 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 65535); + p += 2; + } + } else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) +{ + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; + + p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); + if (p == NULL) return stbi__err("outofmem", "Out of memory"); + + // between here and free(out) below, exitting would leak + temp_out = p; + + if (pal_img_n == 3) { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p += 3; + } + } else { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p[3] = palette[n+3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; + + STBI_NOTUSED(len); + + return 1; +} + +static int stbi__unpremultiply_on_load_global = 0; +static int stbi__de_iphone_flag_global = 0; + +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load_global = flag_true_if_should_unpremultiply; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag_global = flag_true_if_should_convert; +} + +#ifndef STBI_THREAD_LOCAL +#define stbi__unpremultiply_on_load stbi__unpremultiply_on_load_global +#define stbi__de_iphone_flag stbi__de_iphone_flag_global +#else +static STBI_THREAD_LOCAL int stbi__unpremultiply_on_load_local, stbi__unpremultiply_on_load_set; +static STBI_THREAD_LOCAL int stbi__de_iphone_flag_local, stbi__de_iphone_flag_set; + +STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load_local = flag_true_if_should_unpremultiply; + stbi__unpremultiply_on_load_set = 1; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag_local = flag_true_if_should_convert; + stbi__de_iphone_flag_set = 1; +} + +#define stbi__unpremultiply_on_load (stbi__unpremultiply_on_load_set \ + ? stbi__unpremultiply_on_load_local \ + : stbi__unpremultiply_on_load_global) +#define stbi__de_iphone_flag (stbi__de_iphone_flag_set \ + ? stbi__de_iphone_flag_local \ + : stbi__de_iphone_flag_global) +#endif // STBI_THREAD_LOCAL + +static void stbi__de_iphone(stbi__png *z) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + if (s->img_out_n == 3) { // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } else { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) { + // convert bgr to rgb and unpremultiply + for (i=0; i < pixel_count; ++i) { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) { + stbi_uc half = a / 2; + p[0] = (p[2] * 255 + half) / a; + p[1] = (p[1] * 255 + half) / a; + p[2] = ( t * 255 + half) / a; + } else { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } else { + // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } +} + +#define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d)) + +static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) +{ + stbi_uc palette[1024], pal_img_n=0; + stbi_uc has_trans=0, tc[3]={0}; + stbi__uint16 tc16[3]; + stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; + int first=1,k,interlace=0, color=0, is_iphone=0; + stbi__context *s = z->s; + + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; + + if (!stbi__check_png_header(s)) return 0; + + if (scan == STBI__SCAN_type) return 1; + + for (;;) { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) { + case STBI__PNG_TYPE('C','g','B','I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I','H','D','R'): { + int comp,filter; + if (!first) return stbi__err("multiple IHDR","Corrupt PNG"); + first = 0; + if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG"); + s->img_x = stbi__get32be(s); + s->img_y = stbi__get32be(s); + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); + color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG"); + comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG"); + filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG"); + interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG"); + if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG"); + if (!pal_img_n) { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + } else { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); + } + // even with SCAN_header, have to scan to see if we have a tRNS + break; + } + + case STBI__PNG_TYPE('P','L','T','E'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG"); + for (i=0; i < pal_len; ++i) { + palette[i*4+0] = stbi__get8(s); + palette[i*4+1] = stbi__get8(s); + palette[i*4+2] = stbi__get8(s); + palette[i*4+3] = 255; + } + break; + } + + case STBI__PNG_TYPE('t','R','N','S'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG"); + if (pal_img_n) { + if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } + if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG"); + if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG"); + pal_img_n = 4; + for (i=0; i < c.length; ++i) + palette[i*4+3] = stbi__get8(s); + } else { + if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); + if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); + has_trans = 1; + // non-paletted with tRNS = constant alpha. if header-scanning, we can stop now. + if (scan == STBI__SCAN_header) { ++s->img_n; return 1; } + if (z->depth == 16) { + for (k = 0; k < s->img_n && k < 3; ++k) // extra loop test to suppress false GCC warning + tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + } else { + for (k = 0; k < s->img_n && k < 3; ++k) + tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + } + } + break; + } + + case STBI__PNG_TYPE('I','D','A','T'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); + if (scan == STBI__SCAN_header) { + // header scan definitely stops at first IDAT + if (pal_img_n) + s->img_n = pal_img_n; + return 1; + } + if (c.length > (1u << 30)) return stbi__err("IDAT size limit", "IDAT section larger than 2^30 bytes"); + if ((int)(ioff + c.length) < (int)ioff) return 0; + if (ioff + c.length > idata_limit) { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG"); + ioff += c.length; + break; + } + + case STBI__PNG_TYPE('I','E','N','D'): { + stbi__uint32 raw_len, bpl; + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) return 1; + if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone); + if (z->expanded == NULL) return 0; // zlib should set error + STBI_FREE(z->idata); z->idata = NULL; + if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n+1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0; + if (has_trans) { + if (z->depth == 16) { + if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0; + } else { + if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; + } + } + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } else if (has_trans) { + // non-paletted image with tRNS -> source image has (constant) alpha + ++s->img_n; + } + STBI_FREE(z->expanded); z->expanded = NULL; + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + return 1; + } + + default: + // if critical, fail + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) { + #ifndef STBI_NO_FAILURE_STRINGS + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); + #endif + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } +} + +static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) +{ + void *result=NULL; + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { + if (p->depth <= 8) + ri->bits_per_channel = 8; + else if (p->depth == 16) + ri->bits_per_channel = 16; + else + return stbi__errpuc("bad bits_per_channel", "PNG not supported: unsupported color depth"); + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) { + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) *n = p->s->img_n; + } + STBI_FREE(p->out); p->out = NULL; + STBI_FREE(p->expanded); p->expanded = NULL; + STBI_FREE(p->idata); p->idata = NULL; + + return result; +} + +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi__png p; + p.s = s; + return stbi__do_png(&p, x,y,comp,req_comp, ri); +} + +static int stbi__png_test(stbi__context *s) +{ + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; +} + +static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) +{ + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { + stbi__rewind( p->s ); + return 0; + } + if (x) *x = p->s->img_x; + if (y) *y = p->s->img_y; + if (comp) *comp = p->s->img_n; + return 1; +} + +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); +} + +static int stbi__png_is16(stbi__context *s) +{ + stbi__png p; + p.s = s; + if (!stbi__png_info_raw(&p, NULL, NULL, NULL)) + return 0; + if (p.depth != 16) { + stbi__rewind(p.s); + return 0; + } + return 1; +} +#endif + +// Microsoft/Windows BMP image + +#ifndef STBI_NO_BMP +static int stbi__bmp_test_raw(stbi__context *s) +{ + int r; + int sz; + if (stbi__get8(s) != 'B') return 0; + if (stbi__get8(s) != 'M') return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; +} + +static int stbi__bmp_test(stbi__context *s) +{ + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; +} + + +// returns 0..31 for the highest set bit +static int stbi__high_bit(unsigned int z) +{ + int n=0; + if (z == 0) return -1; + if (z >= 0x10000) { n += 16; z >>= 16; } + if (z >= 0x00100) { n += 8; z >>= 8; } + if (z >= 0x00010) { n += 4; z >>= 4; } + if (z >= 0x00004) { n += 2; z >>= 2; } + if (z >= 0x00002) { n += 1;/* >>= 1;*/ } + return n; +} + +static int stbi__bitcount(unsigned int a) +{ + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; +} + +// extract an arbitrarily-aligned N-bit value (N=bits) +// from v, and then make it 8-bits long and fractionally +// extend it to full full range. +static int stbi__shiftsigned(unsigned int v, int shift, int bits) +{ + static unsigned int mul_table[9] = { + 0, + 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/, + 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/, + }; + static unsigned int shift_table[9] = { + 0, 0,0,1,0,2,4,6,0, + }; + if (shift < 0) + v <<= -shift; + else + v >>= shift; + STBI_ASSERT(v < 256); + v >>= (8-bits); + STBI_ASSERT(bits >= 0 && bits <= 8); + return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits]; +} + +typedef struct +{ + int bpp, offset, hsz; + unsigned int mr,mg,mb,ma, all_a; + int extra_read; +} stbi__bmp_data; + +static int stbi__bmp_set_mask_defaults(stbi__bmp_data *info, int compress) +{ + // BI_BITFIELDS specifies masks explicitly, don't override + if (compress == 3) + return 1; + + if (compress == 0) { + if (info->bpp == 16) { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } else if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } else { + // otherwise, use defaults, which is all-0 + info->mr = info->mg = info->mb = info->ma = 0; + } + return 1; + } + return 0; // error +} + +static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) +{ + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + info->mr = info->mg = info->mb = info->ma = 0; + info->extra_read = 14; + + if (info->offset < 0) return stbi__errpuc("bad BMP", "bad BMP"); + + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } else { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (hsz != 12) { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + if (compress >= 4) return stbi__errpuc("BMP JPEG/PNG", "BMP type not supported: unsupported compression"); // this includes PNG/JPEG modes + if (compress == 3 && info->bpp != 16 && info->bpp != 32) return stbi__errpuc("bad BMP", "bad BMP"); // bitfields requires 16 or 32 bits/pixel + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) { + if (hsz == 56) { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) { + if (compress == 0) { + stbi__bmp_set_mask_defaults(info, compress); + } else if (compress == 3) { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->extra_read += 12; + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else { + // V4/V5 header + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + if (compress != 3) // override mr/mg/mb unless in BI_BITFIELDS mode, as per docs + stbi__bmp_set_mask_defaults(info, compress); + stbi__get32le(s); // discard color space + for (i=0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *) 1; +} + + +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + unsigned int mr=0,mg=0,mb=0,ma=0, all_a; + stbi_uc pal[256][4]; + int psize=0,i,j,width; + int flip_vertically, pad, target; + stbi__bmp_data info; + STBI_NOTUSED(ri); + + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set + + flip_vertically = ((int) s->img_y) > 0; + s->img_y = abs((int) s->img_y); + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; + + if (info.hsz == 12) { + if (info.bpp < 24) + psize = (info.offset - info.extra_read - 24) / 3; + } else { + if (info.bpp < 16) + psize = (info.offset - info.extra_read - info.hsz) >> 2; + } + if (psize == 0) { + // accept some number of extra bytes after the header, but if the offset points either to before + // the header ends or implies a large amount of extra data, reject the file as malformed + int bytes_read_so_far = s->callback_already_read + (int)(s->img_buffer - s->img_buffer_original); + int header_limit = 1024; // max we actually read is below 256 bytes currently. + int extra_data_limit = 256*4; // what ordinarily goes here is a palette; 256 entries*4 bytes is its max size. + if (bytes_read_so_far <= 0 || bytes_read_so_far > header_limit) { + return stbi__errpuc("bad header", "Corrupt BMP"); + } + // we established that bytes_read_so_far is positive and sensible. + // the first half of this test rejects offsets that are either too small positives, or + // negative, and guarantees that info.offset >= bytes_read_so_far > 0. this in turn + // ensures the number computed in the second half of the test can't overflow. + if (info.offset < bytes_read_so_far || info.offset - bytes_read_so_far > extra_data_limit) { + return stbi__errpuc("bad offset", "Corrupt BMP"); + } else { + stbi__skip(s, info.offset - bytes_read_so_far); + } + } + + if (info.bpp == 24 && ma == 0xff000000) + s->img_n = 3; + else + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert + + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); + + out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) { + int z=0; + if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } + for (i=0; i < psize; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - info.extra_read - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 1) width = (s->img_x + 7) >> 3; + else if (info.bpp == 4) width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) width = s->img_x; + else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } + pad = (-width)&3; + if (info.bpp == 1) { + for (j=0; j < (int) s->img_y; ++j) { + int bit_offset = 7, v = stbi__get8(s); + for (i=0; i < (int) s->img_x; ++i) { + int color = (v>>bit_offset)&0x1; + out[z++] = pal[color][0]; + out[z++] = pal[color][1]; + out[z++] = pal[color][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + if((--bit_offset) < 0) { + bit_offset = 7; + v = stbi__get8(s); + } + } + stbi__skip(s, pad); + } + } else { + for (j=0; j < (int) s->img_y; ++j) { + for (i=0; i < (int) s->img_x; i += 2) { + int v=stbi__get8(s),v2=0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); + } + } + } else { + int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; + int z = 0; + int easy=0; + stbi__skip(s, info.offset - info.extra_read - info.hsz); + if (info.bpp == 24) width = 3 * s->img_x; + else if (info.bpp == 16) width = 2*s->img_x; + else /* bpp = 32 and pad = 0 */ width=0; + pad = (-width) & 3; + if (info.bpp == 24) { + easy = 1; + } else if (info.bpp == 32) { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) { + if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); + if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + } + for (j=0; j < (int) s->img_y; ++j) { + if (easy) { + for (i=0; i < (int) s->img_x; ++i) { + unsigned char a; + out[z+2] = stbi__get8(s); + out[z+1] = stbi__get8(s); + out[z+0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) out[z++] = a; + } + } else { + int bpp = info.bpp; + for (i=0; i < (int) s->img_x; ++i) { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); + unsigned int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } + + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) + out[i] = 255; + + if (flip_vertically) { + stbi_uc t; + for (j=0; j < (int) s->img_y>>1; ++j) { + stbi_uc *p1 = out + j *s->img_x*target; + stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; + for (i=0; i < (int) s->img_x*target; ++i) { + t = p1[i]; p1[i] = p2[i]; p2[i] = t; + } + } + } + + if (req_comp && req_comp != target) { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + return out; +} +#endif + +// Targa Truevision - TGA +// by Jonathan Dummer +#ifndef STBI_NO_TGA +// returns STBI_rgb or whatever, 0 on error +static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) +{ + // only RGB or RGBA (incl. 16bit) or grey allowed + if (is_rgb16) *is_rgb16 = 0; + switch(bits_per_pixel) { + case 8: return STBI_grey; + case 16: if(is_grey) return STBI_grey_alpha; + // fallthrough + case 15: if(is_rgb16) *is_rgb16 = 1; + return STBI_rgb; + case 24: // fallthrough + case 32: return bits_per_pixel/8; + default: return 0; + } +} + +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) +{ + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if( tga_colormap_type > 1 ) { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if ( tga_colormap_type == 1 ) { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip image x and y origin + tga_colormap_bpp = sz; + } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s,9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if( tga_w < 1 ) { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if( tga_h < 1 ) { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) { + if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } else { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if(!tga_comp) { + stbi__rewind(s); + return 0; + } + if (x) *x = tga_w; + if (y) *y = tga_h; + if (comp) *comp = tga_comp; + return 1; // seems to have passed everything +} + +static int stbi__tga_test(stbi__context *s) +{ + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if ( tga_color_type == 1 ) { // colormapped (paletted) image + if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + stbi__skip(s,4); // skip image x and y origin + } else { // "normal" image w/o colormap + if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s,9); // skip colormap specification and image x/y origin + } + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + +errorEnd: + stbi__rewind(s); + return res; +} + +// read 16bit value and convert to 24bit RGB +static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +{ + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (stbi_uc)((r * 255)/31); + out[1] = (stbi_uc)((g * 255)/31); + out[2] = (stbi_uc)((b * 255)/31); + + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. +} + +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16=0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4] = {0}; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + STBI_NOTUSED(ri); + STBI_NOTUSED(tga_x_origin); // @TODO + STBI_NOTUSED(tga_y_origin); // @TODO + + if (tga_height > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (tga_width > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + // do a tiny bit of precessing + if ( tga_image_type >= 8 ) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); + + // If I'm paletted, then I'll use the number of bits from the palette + if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + + if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + + // tga info + *x = tga_width; + *y = tga_height; + if (comp) *comp = tga_comp; + + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); + + tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); + if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); + + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset ); + + if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) { + for (i=0; i < tga_height; ++i) { + int row = tga_inverted ? tga_height -i - 1 : i; + stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } else { + // do I need to load a palette? + if ( tga_indexed) + { + if (tga_palette_len == 0) { /* you have to have at least one entry! */ + STBI_FREE(tga_data); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start ); + // load the palette + tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); + if (!tga_palette) { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i=0; i < tga_palette_len; ++i) { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i=0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if ( tga_is_RLE ) + { + if ( RLE_count == 0 ) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } else if ( !RLE_repeating ) + { + read_next_pixel = 1; + } + } else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if ( read_next_pixel ) + { + // load however much data we did have + if ( tga_indexed ) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if ( pal_idx >= tga_palette_len ) { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = tga_palette[pal_idx+j]; + } + } else if(tga_rgb16) { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } else { + // read in the data raw + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel + + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i*tga_comp+j] = raw_data[j]; + + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if ( tga_inverted ) + { + for (j = 0; j*2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if ( tga_palette != NULL ) + { + STBI_FREE( tga_palette ); + } + } + + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char* tga_pixel = tga_data; + for (i=0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } + + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + STBI_NOTUSED(tga_palette_start); + // OK, done + return tga_data; +} +#endif + +// ************************************************************************************************* +// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s) +{ + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; +} + +static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) +{ + int count, nleft, len; + + count = 0; + while ((nleft = pixelCount - count) > 0) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) return 0; // corrupt data + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + + return 1; +} + +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + int pixelCount; + int channelCount, compression; + int channel, i; + int bitdepth; + int w,h; + stbi_uc *out; + STBI_NOTUSED(ri); + + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); + + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + + // Skip 6 reserved bytes. + stbi__skip(s, 6 ); + + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); + + if (h > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (w > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s,stbi__get32be(s) ); + + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s) ); + + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s) ); + + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); + + // Create the destination image. + + if (!compression && bitdepth == 16 && bpc == 16) { + out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } else + out = (stbi_uc *) stbi__malloc(4 * w*h); + + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w*h; + + // Initialize the data to zero. + //memset( out, 0, pixelCount * 4 ); + + // Finally, the image data. + if (compression) { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop + + // The RLE-compressed data is preceded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2 ); + + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out+channel; + if (channel >= channelCount) { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } else { + // Read the RLE data. + if (!stbi__psd_decode_rle(s, p, pixelCount)) { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); + } + } + } + + } else { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. + + // Read the data by channel. + for (channel = 0; channel < 4; channel++) { + if (channel >= channelCount) { + // Fill this channel with default data. + if (bitdepth == 16 && bpc == 16) { + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; + } else { + stbi_uc *p = out+channel; + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } + } else { + if (ri->bits_per_channel == 16) { // output bpc + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16) stbi__get16be(s); + } else { + stbi_uc *p = out+channel; + if (bitdepth == 16) { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc) (stbi__get16be(s) >> 8); + } else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + } + + // remove weird white matte from PSD + if (channelCount >= 4) { + if (ri->bits_per_channel == 16) { + for (i=0; i < w*h; ++i) { + stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i; + if (pixel[3] != 0 && pixel[3] != 65535) { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a); + pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a); + pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a); + } + } + } else { + for (i=0; i < w*h; ++i) { + unsigned char *pixel = out + 4*i; + if (pixel[3] != 0 && pixel[3] != 255) { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char) (pixel[0]*ra + inv_a); + pixel[1] = (unsigned char) (pixel[1]*ra + inv_a); + pixel[2] = (unsigned char) (pixel[2]*ra + inv_a); + } + } + } + } + + // convert to desired output format + if (req_comp && req_comp != 4) { + if (ri->bits_per_channel == 16) + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + if (comp) *comp = 4; + *y = h; + *x = w; + + return out; +} +#endif + +// ************************************************************************************************* +// Softimage PIC loader +// by Tom Seddon +// +// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format +// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ + +#ifndef STBI_NO_PIC +static int stbi__pic_is4(stbi__context *s,const char *str) +{ + int i; + for (i=0; i<4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; + + return 1; +} + +static int stbi__pic_test_core(stbi__context *s) +{ + int i; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) + return 0; + + for(i=0;i<84;++i) + stbi__get8(s); + + if (!stbi__pic_is4(s,"PICT")) + return 0; + + return 1; +} + +typedef struct +{ + stbi_uc size,type,channel; +} stbi__pic_packet; + +static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) +{ + int mask=0x80, i; + + for (i=0; i<4; ++i, mask>>=1) { + if (channel & mask) { + if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short"); + dest[i]=stbi__get8(s); + } + } + + return dest; +} + +static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src) +{ + int mask=0x80,i; + + for (i=0;i<4; ++i, mask>>=1) + if (channel&mask) + dest[i]=src[i]; +} + +static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result) +{ + int act_comp=0,num_packets=0,y,chained; + stbi__pic_packet packets[10]; + + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return stbi__errpuc("bad format","too many packets"); + + packet = &packets[num_packets++]; + + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + + act_comp |= packet->channel; + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)"); + if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp"); + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + + for(y=0; ytype) { + default: + return stbi__errpuc("bad format","packet has bad compression type"); + + case 0: {//uncompressed + int x; + + for(x=0;xchannel,dest)) + return 0; + break; + } + + case 1://Pure RLE + { + int left=width, i; + + while (left>0) { + stbi_uc count,value[4]; + + count=stbi__get8(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)"); + + if (count > left) + count = (stbi_uc) left; + + if (!stbi__readval(s,packet->channel,value)) return 0; + + for(i=0; ichannel,dest,value); + left -= count; + } + } + break; + + case 2: {//Mixed RLE + int left=width; + while (left>0) { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)"); + + if (count >= 128) { // Repeated + stbi_uc value[4]; + + if (count==128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file","scanline overrun"); + + if (!stbi__readval(s,packet->channel,value)) + return 0; + + for(i=0;ichannel,dest,value); + } else { // Raw + ++count; + if (count>left) return stbi__errpuc("bad file","scanline overrun"); + + for(i=0;ichannel,dest)) + return 0; + } + left-=count; + } + break; + } + } + } + } + + return result; +} + +static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri) +{ + stbi_uc *result; + int i, x,y, internal_comp; + STBI_NOTUSED(ri); + + if (!comp) comp = &internal_comp; + + for (i=0; i<92; ++i) + stbi__get8(s); + + x = stbi__get16be(s); + y = stbi__get16be(s); + + if (y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); + + stbi__get32be(s); //skip `ratio' + stbi__get16be(s); //skip `fields' + stbi__get16be(s); //skip `pad' + + // intermediate buffer is RGBA + result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0); + if (!result) return stbi__errpuc("outofmem", "Out of memory"); + memset(result, 0xff, x*y*4); + + if (!stbi__pic_load_core(s,x,y,comp, result)) { + STBI_FREE(result); + result=0; + } + *px = x; + *py = y; + if (req_comp == 0) req_comp = *comp; + result=stbi__convert_format(result,4,req_comp,x,y); + + return result; +} + +static int stbi__pic_test(stbi__context *s) +{ + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; +} +#endif + +// ************************************************************************************************* +// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb + +#ifndef STBI_NO_GIF +typedef struct +{ + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; +} stbi__gif_lzw; + +typedef struct +{ + int w,h; + stbi_uc *out; // output buffer (always 4 components) + stbi_uc *background; // The current "background" as far as a gif is concerned + stbi_uc *history; + int flags, bgindex, ratio, transparent, eflags; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[8192]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; + int delay; +} stbi__gif; + +static int stbi__gif_test_raw(stbi__context *s) +{ + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') return 0; + if (stbi__get8(s) != 'a') return 0; + return 1; +} + +static int stbi__gif_test(stbi__context *s) +{ + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; +} + +static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) +{ + int i; + for (i=0; i < num_entries; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } +} + +static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) +{ + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); + + version = stbi__get8(s); + if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); + + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; + + if (g->w > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (g->h > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + + if (is_info) return 1; + + if (g->flags & 0x80) + stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1); + + return 1; +} + +static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); + if (!g) return stbi__err("outofmem", "Out of memory"); + if (!stbi__gif_header(s, g, comp, 1)) { + STBI_FREE(g); + stbi__rewind( s ); + return 0; + } + if (x) *x = g->w; + if (y) *y = g->h; + STBI_FREE(g); + return 1; +} + +static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) +{ + stbi_uc *p, *c; + int idx; + + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); + + if (g->cur_y >= g->max_y) return; + + idx = g->cur_x + g->cur_y; + p = &g->out[idx]; + g->history[idx / 4] = 1; + + c = &g->color_table[g->codes[code].suffix * 4]; + if (c[3] > 128) { // don't render transparent pixels; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; + + if (g->cur_x >= g->max_x) { + g->cur_x = g->start_x; + g->cur_y += g->step; + + while (g->cur_y >= g->max_y && g->parse > 0) { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } +} + +static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) +{ + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; + + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc) init_code; + g->codes[init_code].suffix = (stbi_uc) init_code; + } + + // support no starting clear code + avail = clear+2; + oldcode = -1; + + len = 0; + for(;;) { + if (valid_bits < codesize) { + if (len == 0) { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32) stbi__get8(s) << valid_bits; + valid_bits += 8; + } else { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } else if (code == clear + 1) { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s,len); + return g->out; + } else if (code <= avail) { + if (first) { + return stbi__errpuc("no clear code", "Corrupt GIF"); + } + + if (oldcode >= 0) { + p = &g->codes[avail++]; + if (avail > 8192) { + return stbi__errpuc("too many codes", "Corrupt GIF"); + } + + p->prefix = (stbi__int16) oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + + stbi__out_gif_code(g, (stbi__uint16) code); + + if ((avail & codemask) == 0 && avail <= 0x0FFF) { + codesize++; + codemask = (1 << codesize) - 1; + } + + oldcode = code; + } else { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } +} + +// this function is designed to support animated gifs, although stb_image doesn't support it +// two back is the image from two frames ago, used for a very specific disposal format +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back) +{ + int dispose; + int first_frame; + int pi; + int pcount; + STBI_NOTUSED(req_comp); + + // on first frame, any non-written pixels get the background colour (non-transparent) + first_frame = 0; + if (g->out == 0) { + if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header + if (!stbi__mad3sizes_valid(4, g->w, g->h, 0)) + return stbi__errpuc("too large", "GIF image is too large"); + pcount = g->w * g->h; + g->out = (stbi_uc *) stbi__malloc(4 * pcount); + g->background = (stbi_uc *) stbi__malloc(4 * pcount); + g->history = (stbi_uc *) stbi__malloc(pcount); + if (!g->out || !g->background || !g->history) + return stbi__errpuc("outofmem", "Out of memory"); + + // image is treated as "transparent" at the start - ie, nothing overwrites the current background; + // background colour is only used for pixels that are not rendered first frame, after that "background" + // color refers to the color that was there the previous frame. + memset(g->out, 0x00, 4 * pcount); + memset(g->background, 0x00, 4 * pcount); // state of the background (starts transparent) + memset(g->history, 0x00, pcount); // pixels that were affected previous frame + first_frame = 1; + } else { + // second frame - how do we dispose of the previous one? + dispose = (g->eflags & 0x1C) >> 2; + pcount = g->w * g->h; + + if ((dispose == 3) && (two_back == 0)) { + dispose = 2; // if I don't have an image to revert back to, default to the old background + } + + if (dispose == 3) { // use previous graphic + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 ); + } + } + } else if (dispose == 2) { + // restore what was changed last frame to background before that frame; + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 ); + } + } + } else { + // This is a non-disposal case eithe way, so just + // leave the pixels as is, and they will become the new background + // 1: do not dispose + // 0: not specified. + } + + // background is what out is after the undoing of the previou frame; + memcpy( g->background, g->out, 4 * g->w * g->h ); + } + + // clear my history; + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + + for (;;) { + int tag = stbi__get8(s); + switch (tag) { + case 0x2C: /* Image Descriptor */ + { + stbi__int32 x, y, w, h; + stbi_uc *o; + + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; + + // if the width of the specified rectangle is 0, that means + // we may not see *any* pixels or the image is malformed; + // to make sure this is caught, move the current y down to + // max_y (which is what out_gif_code checks). + if (w == 0) + g->cur_y = g->max_y; + + g->lflags = stbi__get8(s); + + if (g->lflags & 0x40) { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } else { + g->step = g->line_size; + g->parse = 0; + } + + if (g->lflags & 0x80) { + stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *) g->lpal; + } else if (g->flags & 0x80) { + g->color_table = (stbi_uc *) g->pal; + } else + return stbi__errpuc("missing color table", "Corrupt GIF"); + + o = stbi__process_gif_raster(s, g); + if (!o) return NULL; + + // if this was the first frame, + pcount = g->w * g->h; + if (first_frame && (g->bgindex > 0)) { + // if first frame, any pixel not drawn to gets the background color + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi] == 0) { + g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be; + memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 ); + } + } + } + + return o; + } + + case 0x21: // Comment Extension. + { + int len; + int ext = stbi__get8(s); + if (ext == 0xF9) { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) { + g->eflags = stbi__get8(s); + g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths. + + // unset old transparent + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 255; + } + if (g->eflags & 0x01) { + g->transparent = stbi__get8(s); + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 0; + } + } else { + // don't need transparent + stbi__skip(s, 1); + g->transparent = -1; + } + } else { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) { + stbi__skip(s, len); + } + break; + } + + case 0x3B: // gif stream termination code + return (stbi_uc *) s; // using '1' causes warning on some compilers + + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } +} + +static void *stbi__load_gif_main_outofmem(stbi__gif *g, stbi_uc *out, int **delays) +{ + STBI_FREE(g->out); + STBI_FREE(g->history); + STBI_FREE(g->background); + + if (out) STBI_FREE(out); + if (delays && *delays) STBI_FREE(*delays); + return stbi__errpuc("outofmem", "Out of memory"); +} + +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + if (stbi__gif_test(s)) { + int layers = 0; + stbi_uc *u = 0; + stbi_uc *out = 0; + stbi_uc *two_back = 0; + stbi__gif g; + int stride; + int out_size = 0; + int delays_size = 0; + + STBI_NOTUSED(out_size); + STBI_NOTUSED(delays_size); + + memset(&g, 0, sizeof(g)); + if (delays) { + *delays = 0; + } + + do { + u = stbi__gif_load_next(s, &g, comp, req_comp, two_back); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + + if (u) { + *x = g.w; + *y = g.h; + ++layers; + stride = g.w * g.h * 4; + + if (out) { + void *tmp = (stbi_uc*) STBI_REALLOC_SIZED( out, out_size, layers * stride ); + if (!tmp) + return stbi__load_gif_main_outofmem(&g, out, delays); + else { + out = (stbi_uc*) tmp; + out_size = layers * stride; + } + + if (delays) { + int *new_delays = (int*) STBI_REALLOC_SIZED( *delays, delays_size, sizeof(int) * layers ); + if (!new_delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + *delays = new_delays; + delays_size = layers * sizeof(int); + } + } else { + out = (stbi_uc*)stbi__malloc( layers * stride ); + if (!out) + return stbi__load_gif_main_outofmem(&g, out, delays); + out_size = layers * stride; + if (delays) { + *delays = (int*) stbi__malloc( layers * sizeof(int) ); + if (!*delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + delays_size = layers * sizeof(int); + } + } + memcpy( out + ((layers - 1) * stride), u, stride ); + if (layers >= 2) { + two_back = out - 2 * stride; + } + + if (delays) { + (*delays)[layers - 1U] = g.delay; + } + } + } while (u != 0); + + // free temp buffer; + STBI_FREE(g.out); + STBI_FREE(g.history); + STBI_FREE(g.background); + + // do the final conversion after loading everything; + if (req_comp && req_comp != 4) + out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h); + + *z = layers; + return out; + } else { + return stbi__errpuc("not GIF", "Image was not as a gif type."); + } +} + +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *u = 0; + stbi__gif g; + memset(&g, 0, sizeof(g)); + STBI_NOTUSED(ri); + + u = stbi__gif_load_next(s, &g, comp, req_comp, 0); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + if (u) { + *x = g.w; + *y = g.h; + + // moved conversion to after successful load so that the same + // can be done for multiple frames. + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g.w, g.h); + } else if (g.out) { + // if there was an error and we allocated an image buffer, free it! + STBI_FREE(g.out); + } + + // free buffers needed for multiple frame loading; + STBI_FREE(g.history); + STBI_FREE(g.background); + + return u; +} + +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) +{ + return stbi__gif_info_raw(s,x,y,comp); +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR loader +// originally by Nicolas Schulz +#ifndef STBI_NO_HDR +static int stbi__hdr_test_core(stbi__context *s, const char *signature) +{ + int i; + for (i=0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + stbi__rewind(s); + return 1; +} + +static int stbi__hdr_test(stbi__context* s) +{ + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); + stbi__rewind(s); + if(!r) { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } + return r; +} + +#define STBI__HDR_BUFLEN 1024 +static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) +{ + int len=0; + char c = '\0'; + + c = (char) stbi__get8(z); + + while (!stbi__at_eof(z) && c != '\n') { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN-1) { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char) stbi__get8(z); + } + + buffer[len] = 0; + return buffer; +} + +static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) +{ + if ( input[3] != 0 ) { + float f1; + // Exponent + f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) output[1] = 1; + if (req_comp == 4) output[3] = 1; + } else { + switch (req_comp) { + case 4: output[3] = 1; /* fallthrough */ + case 3: output[0] = output[1] = output[2] = 0; + break; + case 2: output[1] = 1; /* fallthrough */ + case 1: output[0] = 0; + break; + } + } +} + +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1,c2, z; + const char *headerToken; + STBI_NOTUSED(ri); + + // Check identifier + headerToken = stbi__hdr_gettoken(s,buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); + + // Parse header + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); + + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int) strtol(token, NULL, 10); + + if (height > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + if (width > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + + *x = width; + *y = height; + + if (comp) *comp = 3; + if (req_comp == 0) req_comp = 3; + + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); + + // Read data + hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); + + // Load image data + // image data is stored as some number of sca + if ( width < 8 || width >= 32768) { + // Read flat data + for (j=0; j < height; ++j) { + for (i=0; i < width; ++i) { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } else { + // Read RLE-encoded data + scanline = NULL; + + for (j = 0; j < height; ++j) { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc) c1; + rgbe[1] = (stbi_uc) c2; + rgbe[2] = (stbi_uc) len; + rgbe[3] = (stbi_uc) stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } + if (scanline == NULL) { + scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0); + if (!scanline) { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } + + for (k = 0; k < 4; ++k) { + int nleft; + i = 0; + while ((nleft = width - i) > 0) { + count = stbi__get8(s); + if (count > 128) { + // Run + value = stbi__get8(s); + count -= 128; + if ((count == 0) || (count > nleft)) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } else { + // Dump + if ((count == 0) || (count > nleft)) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i=0; i < width; ++i) + stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp); + } + if (scanline) + STBI_FREE(scanline); + } + + return hdr_data; +} + +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int dummy; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (stbi__hdr_test(s) == 0) { + stbi__rewind( s ); + return 0; + } + + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) { + stbi__rewind( s ); + return 0; + } + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *y = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *x = (int) strtol(token, NULL, 10); + *comp = 3; + return 1; +} +#endif // STBI_NO_HDR + +#ifndef STBI_NO_BMP +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) +{ + void *p; + stbi__bmp_data info; + + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + if (p == NULL) { + stbi__rewind( s ); + return 0; + } + if (x) *x = s->img_x; + if (y) *y = s->img_y; + if (comp) { + if (info.bpp == 24 && info.ma == 0xff000000) + *comp = 3; + else + *comp = info.ma ? 4 : 3; + } + return 1; +} +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) +{ + int channelCount, dummy, depth; + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 8 && depth != 16) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 3) { + stbi__rewind( s ); + return 0; + } + *comp = 4; + return 1; +} + +static int stbi__psd_is16(stbi__context *s) +{ + int channelCount, depth; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + STBI_NOTUSED(stbi__get32be(s)); + STBI_NOTUSED(stbi__get32be(s)); + depth = stbi__get16be(s); + if (depth != 16) { + stbi__rewind( s ); + return 0; + } + return 1; +} +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) +{ + int act_comp=0,num_packets=0,chained,dummy; + stbi__pic_packet packets[10]; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 88); + + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) { + stbi__rewind( s); + return 0; + } + if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) { + stbi__rewind( s ); + return 0; + } + + stbi__skip(s, 8); + + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return 0; + + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; + + if (stbi__at_eof(s)) { + stbi__rewind( s ); + return 0; + } + if (packet->size != 8) { + stbi__rewind( s ); + return 0; + } + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); + + return 1; +} +#endif + +// ************************************************************************************************* +// Portable Gray Map and Portable Pixel Map loader +// by Ken Miller +// +// PGM: http://netpbm.sourceforge.net/doc/pgm.html +// PPM: http://netpbm.sourceforge.net/doc/ppm.html +// +// Known limitations: +// Does not support comments in the header section +// Does not support ASCII image data (formats P2 and P3) + +#ifndef STBI_NO_PNM + +static int stbi__pnm_test(stbi__context *s) +{ + char p, t; + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind( s ); + return 0; + } + return 1; +} + +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + STBI_NOTUSED(ri); + + ri->bits_per_channel = stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n); + if (ri->bits_per_channel == 0) + return 0; + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + + if (!stbi__mad4sizes_valid(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0)) + return stbi__errpuc("too large", "PNM too large"); + + out = (stbi_uc *) stbi__malloc_mad4(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (!stbi__getn(s, out, s->img_n * s->img_x * s->img_y * (ri->bits_per_channel / 8))) { + STBI_FREE(out); + return stbi__errpuc("bad PNM", "PNM file truncated"); + } + + if (req_comp && req_comp != s->img_n) { + if (ri->bits_per_channel == 16) { + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, s->img_n, req_comp, s->img_x, s->img_y); + } else { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + } + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + return out; +} + +static int stbi__pnm_isspace(char c) +{ + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; +} + +static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) +{ + for (;;) { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char) stbi__get8(s); + + if (stbi__at_eof(s) || *c != '#') + break; + + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' ) + *c = (char) stbi__get8(s); + } +} + +static int stbi__pnm_isdigit(char c) +{ + return c >= '0' && c <= '9'; +} + +static int stbi__pnm_getinteger(stbi__context *s, char *c) +{ + int value = 0; + + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { + value = value*10 + (*c - '0'); + *c = (char) stbi__get8(s); + if((value > 214748364) || (value == 214748364 && *c > '7')) + return stbi__err("integer parse overflow", "Parsing an integer in the PPM header overflowed a 32-bit int"); + } + + return value; +} + +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) +{ + int maxv, dummy; + char c, p, t; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + stbi__rewind(s); + + // Get identifier + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind(s); + return 0; + } + + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + + c = (char) stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); + + *x = stbi__pnm_getinteger(s, &c); // read width + if(*x == 0) + return stbi__err("invalid width", "PPM image header had zero or overflowing width"); + stbi__pnm_skip_whitespace(s, &c); + + *y = stbi__pnm_getinteger(s, &c); // read height + if (*y == 0) + return stbi__err("invalid width", "PPM image header had zero or overflowing width"); + stbi__pnm_skip_whitespace(s, &c); + + maxv = stbi__pnm_getinteger(s, &c); // read max value + if (maxv > 65535) + return stbi__err("max value > 65535", "PPM image supports only 8-bit and 16-bit images"); + else if (maxv > 255) + return 16; + else + return 8; +} + +static int stbi__pnm_is16(stbi__context *s) +{ + if (stbi__pnm_info(s, NULL, NULL, NULL) == 16) + return 1; + return 0; +} +#endif + +static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) +{ + #ifndef STBI_NO_JPEG + if (stbi__jpeg_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNG + if (stbi__png_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_GIF + if (stbi__gif_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_BMP + if (stbi__bmp_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PIC + if (stbi__pic_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_info(s, x, y, comp)) return 1; + #endif + + // test tga last because it's a crappy test! + #ifndef STBI_NO_TGA + if (stbi__tga_info(s, x, y, comp)) + return 1; + #endif + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); +} + +static int stbi__is_16_main(stbi__context *s) +{ + #ifndef STBI_NO_PNG + if (stbi__png_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_is16(s)) return 1; + #endif + return 0; +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; +} + +STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s,x,y,comp); + fseek(f,pos,SEEK_SET); + return r; +} + +STBIDEF int stbi_is_16_bit(char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_is_16_bit_from_file(f); + fclose(f); + return result; +} + +STBIDEF int stbi_is_16_bit_from_file(FILE *f) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__is_16_main(&s); + fseek(f,pos,SEEK_SET); + return r; +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__is_16_main(&s); +} + +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__is_16_main(&s); +} + +#endif // STB_IMAGE_IMPLEMENTATION + +/* + revision history: + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug + 1-bit BMP + *_is_16_bit api + avoid warnings + 2.16 (2017-07-23) all functions have 16-bit variants; + STBI_NO_STDIO works again; + compilation fixes; + fix rounding in unpremultiply; + optimize vertical flip; + disable raw_len validation; + documentation fixes + 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode; + warning fixes; disable run-time SSE detection on gcc; + uniform handling of optional "return" values; + thread-safe initialization of zlib tables + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) allocate large structures on the stack + remove white matting for transparent PSD + fix reported channel count for PNG & BMP + re-enable SSE2 in non-gcc 64-bit + support RGB-formatted JPEG + read 16-bit PNGs (only as 8-bit) + 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED + 2.09 (2016-01-16) allow comments in PNM files + 16-bit-per-pixel TGA (not bit-per-component) + info() for TGA could break due to .hdr handling + info() for BMP to shares code instead of sloppy parse + can use STBI_REALLOC_SIZED if allocator doesn't support realloc + code cleanup + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) fix compiler warnings + partial animated GIF support + limited 16-bpc PSD support + #ifdef unused functions + bug with < 92 byte PIC,PNM,HDR,TGA + 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value + 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning + 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit + 2.03 (2015-04-12) extra corruption checking (mmozeiko) + stbi_set_flip_vertically_on_load (nguillemot) + fix NEON support; fix mingw support + 2.02 (2015-01-19) fix incorrect assert, fix warning + 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 + 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG + 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) + progressive JPEG (stb) + PGM/PPM support (Ken Miller) + STBI_MALLOC,STBI_REALLOC,STBI_FREE + GIF bugfix -- seemingly never worked + STBI_NO_*, STBI_ONLY_* + 1.48 (2014-12-14) fix incorrectly-named assert() + 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) + optimize PNG (ryg) + fix bug in interlaced PNG with user-specified channel count (stb) + 1.46 (2014-08-26) + fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG + 1.45 (2014-08-16) + fix MSVC-ARM internal compiler error by wrapping malloc + 1.44 (2014-08-07) + various warning fixes from Ronny Chevalier + 1.43 (2014-07-15) + fix MSVC-only compiler problem in code changed in 1.42 + 1.42 (2014-07-09) + don't define _CRT_SECURE_NO_WARNINGS (affects user code) + fixes to stbi__cleanup_jpeg path + added STBI_ASSERT to avoid requiring assert.h + 1.41 (2014-06-25) + fix search&replace from 1.36 that messed up comments/error messages + 1.40 (2014-06-22) + fix gcc struct-initialization warning + 1.39 (2014-06-15) + fix to TGA optimization when req_comp != number of components in TGA; + fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) + add support for BMP version 5 (more ignored fields) + 1.38 (2014-06-06) + suppress MSVC warnings on integer casts truncating values + fix accidental rename of 'skip' field of I/O + 1.37 (2014-06-04) + remove duplicate typedef + 1.36 (2014-06-03) + convert to header file single-file library + if de-iphone isn't set, load iphone images color-swapped instead of returning NULL + 1.35 (2014-05-27) + various warnings + fix broken STBI_SIMD path + fix bug where stbi_load_from_file no longer left file pointer in correct place + fix broken non-easy path for 32-bit BMP (possibly never used) + TGA optimization by Arseny Kapoulkine + 1.34 (unknown) + use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case + 1.33 (2011-07-14) + make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements + 1.32 (2011-07-13) + support for "info" function for all supported filetypes (SpartanJ) + 1.31 (2011-06-20) + a few more leak fixes, bug in PNG handling (SpartanJ) + 1.30 (2011-06-11) + added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) + removed deprecated format-specific test/load functions + removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway + error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) + fix inefficiency in decoding 32-bit BMP (David Woo) + 1.29 (2010-08-16) + various warning fixes from Aurelien Pocheville + 1.28 (2010-08-01) + fix bug in GIF palette transparency (SpartanJ) + 1.27 (2010-08-01) + cast-to-stbi_uc to fix warnings + 1.26 (2010-07-24) + fix bug in file buffering for PNG reported by SpartanJ + 1.25 (2010-07-17) + refix trans_data warning (Won Chun) + 1.24 (2010-07-12) + perf improvements reading from files on platforms with lock-heavy fgetc() + minor perf improvements for jpeg + deprecated type-specific functions so we'll get feedback if they're needed + attempt to fix trans_data warning (Won Chun) + 1.23 fixed bug in iPhone support + 1.22 (2010-07-10) + removed image *writing* support + stbi_info support from Jetro Lauha + GIF support from Jean-Marc Lienher + iPhone PNG-extensions from James Brown + warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) + 1.21 fix use of 'stbi_uc' in header (reported by jon blow) + 1.20 added support for Softimage PIC, by Tom Seddon + 1.19 bug in interlaced PNG corruption check (found by ryg) + 1.18 (2008-08-02) + fix a threading bug (local mutable static) + 1.17 support interlaced PNG + 1.16 major bugfix - stbi__convert_format converted one too many pixels + 1.15 initialize some fields for thread safety + 1.14 fix threadsafe conversion bug + header-file-only version (#define STBI_HEADER_FILE_ONLY before including) + 1.13 threadsafe + 1.12 const qualifiers in the API + 1.11 Support installable IDCT, colorspace conversion routines + 1.10 Fixes for 64-bit (don't use "unsigned long") + optimized upsampling by Fabian "ryg" Giesen + 1.09 Fix format-conversion for PSD code (bad global variables!) + 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz + 1.07 attempt to fix C++ warning/errors again + 1.06 attempt to fix C++ warning/errors again + 1.05 fix TGA loading to return correct *comp and use good luminance calc + 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free + 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR + 1.02 support for (subset of) HDR files, float interface for preferred access to them + 1.01 fix bug: possible bug in handling right-side up bmps... not sure + fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all + 1.00 interface to zlib that skips zlib header + 0.99 correct handling of alpha in palette + 0.98 TGA loader by lonesock; dynamically add loaders (untested) + 0.97 jpeg errors on too large a file; also catch another malloc failure + 0.96 fix detection of invalid v value - particleman@mollyrocket forum + 0.95 during header scan, seek to markers in case of padding + 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same + 0.93 handle jpegtran output; verbose errors + 0.92 read 4,8,16,24,32-bit BMP files of several formats + 0.91 output 24-bit Windows 3.0 BMP files + 0.90 fix a few more warnings; bump version number to approach 1.0 + 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd + 0.60 fix compiling as c++ + 0.59 fix warnings: merge Dave Moore's -Wall fixes + 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian + 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available + 0.56 fix bug: zlib uncompressed mode len vs. nlen + 0.55 fix bug: restart_interval not initialized to 0 + 0.54 allow NULL for 'int *comp' + 0.53 fix bug in png 3->4; speedup png decoding + 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments + 0.51 obey req_comp requests, 1-component jpegs return as 1-component, + on 'test' only check type, not whether we support this variant + 0.50 (2006-11-19) + first released version +*/ + + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/Blastproof/initfsgen/address.c b/Blastproof/initfsgen/address.c new file mode 100644 index 0000000..b136af8 --- /dev/null +++ b/Blastproof/initfsgen/address.c @@ -0,0 +1,104 @@ +#include +#include + +#include "address.h" +#include "params.h" +#include "utils.h" + +/* + * Specify which level of Merkle tree (the "layer") we're working on + */ +void set_layer_addr(uint32_t addr[8], uint32_t layer) +{ + ((unsigned char *)addr)[SPX_OFFSET_LAYER] = (unsigned char)layer; +} + +/* + * Specify which Merkle tree within the level (the "tree address") we're working on + */ +void set_tree_addr(uint32_t addr[8], uint64_t tree) +{ +#if (SPX_TREE_HEIGHT * (SPX_D - 1)) > 64 + #error Subtree addressing is currently limited to at most 2^64 trees +#endif + ull_to_bytes(&((unsigned char *)addr)[SPX_OFFSET_TREE], 8, tree ); +} + +/* + * Specify the reason we'll use this address structure for, that is, what + * hash will we compute with it. This is used so that unrelated types of + * hashes don't accidentally get the same address structure. The type will be + * one of the SPX_ADDR_TYPE constants + */ +void set_type(uint32_t addr[8], uint32_t type) +{ + ((unsigned char *)addr)[SPX_OFFSET_TYPE] = (unsigned char)type; +} + +/* + * Copy the layer and tree fields of the address structure. This is used + * when we're doing multiple types of hashes within the same Merkle tree + */ +void copy_subtree_addr(uint32_t out[8], const uint32_t in[8]) +{ + memcpy( out, in, SPX_OFFSET_TREE+8 ); +} + +/* These functions are used for OTS addresses. */ + +/* + * Specify which Merkle leaf we're working on; that is, which OTS keypair + * we're talking about. + */ +void set_keypair_addr(uint32_t addr[8], uint32_t keypair) +{ + u32_to_bytes(&((unsigned char *)addr)[SPX_OFFSET_KP_ADDR], keypair); +} + +/* + * Copy the layer, tree and keypair fields of the address structure. This is + * used when we're doing multiple things within the same OTS keypair + */ +void copy_keypair_addr(uint32_t out[8], const uint32_t in[8]) +{ + memcpy( out, in, SPX_OFFSET_TREE+8 ); + memcpy( (unsigned char *)out + SPX_OFFSET_KP_ADDR, (unsigned char *)in + SPX_OFFSET_KP_ADDR, 4); +} + +/* + * Specify which Merkle chain within the OTS we're working with + * (the chain address) + */ +void set_chain_addr(uint32_t addr[8], uint32_t chain) +{ + ((unsigned char *)addr)[SPX_OFFSET_CHAIN_ADDR] = (unsigned char)chain; +} + +/* + * Specify where in the Merkle chain we are +* (the hash address) + */ +void set_hash_addr(uint32_t addr[8], uint32_t hash) +{ + ((unsigned char *)addr)[SPX_OFFSET_HASH_ADDR] = (unsigned char)hash; +} + +/* These functions are used for all hash tree addresses (including FORS). */ + +/* + * Specify the height of the node in the Merkle/FORS tree we are in + * (the tree height) + */ +void set_tree_height(uint32_t addr[8], uint32_t tree_height) +{ + ((unsigned char *)addr)[SPX_OFFSET_TREE_HGT] = (unsigned char)tree_height; +} + +/* + * Specify the distance from the left edge of the node in the Merkle/FORS tree + * (the tree index) + */ +void set_tree_index(uint32_t addr[8], uint32_t tree_index) +{ + u32_to_bytes(&((unsigned char *)addr)[SPX_OFFSET_TREE_INDEX], tree_index ); +} diff --git a/Blastproof/initfsgen/address.h b/Blastproof/initfsgen/address.h new file mode 100644 index 0000000..49f8d66 --- /dev/null +++ b/Blastproof/initfsgen/address.h @@ -0,0 +1,51 @@ +#ifndef SPX_ADDRESS_H +#define SPX_ADDRESS_H + +#include +#include "params.h" + +/* The hash types that are passed to set_type */ +#define SPX_ADDR_TYPE_WOTS 0 +#define SPX_ADDR_TYPE_WOTSPK 1 +#define SPX_ADDR_TYPE_HASHTREE 2 +#define SPX_ADDR_TYPE_FORSTREE 3 +#define SPX_ADDR_TYPE_FORSPK 4 +#define SPX_ADDR_TYPE_WOTSPRF 5 +#define SPX_ADDR_TYPE_FORSPRF 6 + +#define set_layer_addr SPX_NAMESPACE(set_layer_addr) +void set_layer_addr(uint32_t addr[8], uint32_t layer); + +#define set_tree_addr SPX_NAMESPACE(set_tree_addr) +void set_tree_addr(uint32_t addr[8], uint64_t tree); + +#define set_type SPX_NAMESPACE(set_type) +void set_type(uint32_t addr[8], uint32_t type); + +/* Copies the layer and tree part of one address into the other */ +#define copy_subtree_addr SPX_NAMESPACE(copy_subtree_addr) +void copy_subtree_addr(uint32_t out[8], const uint32_t in[8]); + +/* These functions are used for WOTS and FORS addresses. */ + +#define set_keypair_addr SPX_NAMESPACE(set_keypair_addr) +void set_keypair_addr(uint32_t addr[8], uint32_t keypair); + +#define set_chain_addr SPX_NAMESPACE(set_chain_addr) +void set_chain_addr(uint32_t addr[8], uint32_t chain); + +#define set_hash_addr SPX_NAMESPACE(set_hash_addr) +void set_hash_addr(uint32_t addr[8], uint32_t hash); + +#define copy_keypair_addr SPX_NAMESPACE(copy_keypair_addr) +void copy_keypair_addr(uint32_t out[8], const uint32_t in[8]); + +/* These functions are used for all hash tree addresses (including FORS). */ + +#define set_tree_height SPX_NAMESPACE(set_tree_height) +void set_tree_height(uint32_t addr[8], uint32_t tree_height); + +#define set_tree_index SPX_NAMESPACE(set_tree_index) +void set_tree_index(uint32_t addr[8], uint32_t tree_index); + +#endif diff --git a/Blastproof/initfsgen/api.h b/Blastproof/initfsgen/api.h new file mode 100644 index 0000000..d57a148 --- /dev/null +++ b/Blastproof/initfsgen/api.h @@ -0,0 +1,77 @@ +#ifndef SPX_API_H +#define SPX_API_H + +#include +#include + +#include "params.h" + +#define CRYPTO_ALGNAME "SPHINCS+" + +#define CRYPTO_SECRETKEYBYTES SPX_SK_BYTES +#define CRYPTO_PUBLICKEYBYTES SPX_PK_BYTES +#define CRYPTO_BYTES SPX_BYTES +#define CRYPTO_SEEDBYTES 3*SPX_N + +/* + * Returns the length of a secret key, in bytes + */ +unsigned long long crypto_sign_secretkeybytes(void); + +/* + * Returns the length of a public key, in bytes + */ +unsigned long long crypto_sign_publickeybytes(void); + +/* + * Returns the length of a signature, in bytes + */ +unsigned long long crypto_sign_bytes(void); + +/* + * Returns the length of the seed required to generate a key pair, in bytes + */ +unsigned long long crypto_sign_seedbytes(void); + +/* + * Generates a SPHINCS+ key pair given a seed. + * Format sk: [SK_SEED || SK_PRF || PUB_SEED || root] + * Format pk: [root || PUB_SEED] + */ +int crypto_sign_seed_keypair(unsigned char *pk, unsigned char *sk, + const unsigned char *seed); + +/* + * Generates a SPHINCS+ key pair. + * Format sk: [SK_SEED || SK_PRF || PUB_SEED || root] + * Format pk: [root || PUB_SEED] + */ +int crypto_sign_keypair(unsigned char *pk, unsigned char *sk); + +/** + * Returns an array containing a detached signature. + */ +int crypto_sign_signature(uint8_t *sig, size_t *siglen, + const uint8_t *m, size_t mlen, const uint8_t *sk); + +/** + * Verifies a detached signature and message under a given public key. + */ +int crypto_sign_verify(const uint8_t *sig, size_t siglen, + const uint8_t *m, size_t mlen, const uint8_t *pk); + +/** + * Returns an array containing the signature followed by the message. + */ +int crypto_sign(unsigned char *sm, unsigned long long *smlen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *sk); + +/** + * Verifies a given signature-message pair under a given public key. + */ +int crypto_sign_open(unsigned char *m, unsigned long long *mlen, + const unsigned char *sm, unsigned long long smlen, + const unsigned char *pk); + +#endif diff --git a/Blastproof/initfsgen/build.sh b/Blastproof/initfsgen/build.sh new file mode 100755 index 0000000..0e45a6e --- /dev/null +++ b/Blastproof/initfsgen/build.sh @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: MPL-2.0 +gcc -c address.c fors.c hash_sha2.c merkle.c randombytes.c sign.c utils.c utilsx1.c wots.c wotsx1.c thash_sha2_robust.c sha2.c sha3.c -Ofast -march=native +g++ initfsgen.cpp address.o fors.o hash_sha2.o merkle.o randombytes.o sign.o utils.o utilsx1.o wots.o wotsx1.o thash_sha2_robust.o sha2.o sha3.o -o initfsgen -lcrypto -Ofast -march=native +rm *.o diff --git a/Blastproof/initfsgen/context.h b/Blastproof/initfsgen/context.h new file mode 100644 index 0000000..aded564 --- /dev/null +++ b/Blastproof/initfsgen/context.h @@ -0,0 +1,28 @@ +#ifndef SPX_CONTEXT_H +#define SPX_CONTEXT_H + +#include + +#include "params.h" + +typedef struct { + uint8_t pub_seed[SPX_N]; + uint8_t sk_seed[SPX_N]; + +#ifdef SPX_SHA2 + // sha256 state that absorbed pub_seed + uint8_t state_seeded[40]; + +# if SPX_SHA512 + // sha512 state that absorbed pub_seed + uint8_t state_seeded_512[72]; +# endif +#endif + +#ifdef SPX_HARAKA + uint64_t tweaked512_rc64[10][8]; + uint32_t tweaked256_rc32[10][8]; +#endif +} spx_ctx; + +#endif diff --git a/Blastproof/initfsgen/fors.c b/Blastproof/initfsgen/fors.c new file mode 100644 index 0000000..e6aa4b4 --- /dev/null +++ b/Blastproof/initfsgen/fors.c @@ -0,0 +1,161 @@ +#include +#include +#include + +#include "fors.h" +#include "utils.h" +#include "utilsx1.h" +#include "hash.h" +#include "thash.h" +#include "address.h" + +static void fors_gen_sk(unsigned char *sk, const spx_ctx *ctx, + uint32_t fors_leaf_addr[8]) +{ + prf_addr(sk, ctx, fors_leaf_addr); +} + +static void fors_sk_to_leaf(unsigned char *leaf, const unsigned char *sk, + const spx_ctx *ctx, + uint32_t fors_leaf_addr[8]) +{ + thash(leaf, sk, 1, ctx, fors_leaf_addr); +} + +struct fors_gen_leaf_info { + uint32_t leaf_addrx[8]; +}; + +static void fors_gen_leafx1(unsigned char *leaf, + const spx_ctx *ctx, + uint32_t addr_idx, void *info) +{ + struct fors_gen_leaf_info *fors_info = info; + uint32_t *fors_leaf_addr = fors_info->leaf_addrx; + + /* Only set the parts that the caller doesn't set */ + set_tree_index(fors_leaf_addr, addr_idx); + set_type(fors_leaf_addr, SPX_ADDR_TYPE_FORSPRF); + fors_gen_sk(leaf, ctx, fors_leaf_addr); + + set_type(fors_leaf_addr, SPX_ADDR_TYPE_FORSTREE); + fors_sk_to_leaf(leaf, leaf, + ctx, fors_leaf_addr); +} + +/** + * Interprets m as SPX_FORS_HEIGHT-bit unsigned integers. + * Assumes m contains at least SPX_FORS_HEIGHT * SPX_FORS_TREES bits. + * Assumes indices has space for SPX_FORS_TREES integers. + */ +static void message_to_indices(uint32_t *indices, const unsigned char *m) +{ + unsigned int i, j; + unsigned int offset = 0; + + for (i = 0; i < SPX_FORS_TREES; i++) { + indices[i] = 0; + for (j = 0; j < SPX_FORS_HEIGHT; j++) { + indices[i] ^= ((m[offset >> 3] >> (offset & 0x7)) & 1u) << j; + offset++; + } + } +} + +/** + * Signs a message m, deriving the secret key from sk_seed and the FTS address. + * Assumes m contains at least SPX_FORS_HEIGHT * SPX_FORS_TREES bits. + */ +void fors_sign(unsigned char *sig, unsigned char *pk, + const unsigned char *m, + const spx_ctx *ctx, + const uint32_t fors_addr[8]) +{ + uint32_t indices[SPX_FORS_TREES]; + unsigned char roots[SPX_FORS_TREES * SPX_N]; + uint32_t fors_tree_addr[8] = {0}; + struct fors_gen_leaf_info fors_info = {0}; + uint32_t *fors_leaf_addr = fors_info.leaf_addrx; + uint32_t fors_pk_addr[8] = {0}; + uint32_t idx_offset; + unsigned int i; + + copy_keypair_addr(fors_tree_addr, fors_addr); + copy_keypair_addr(fors_leaf_addr, fors_addr); + + copy_keypair_addr(fors_pk_addr, fors_addr); + set_type(fors_pk_addr, SPX_ADDR_TYPE_FORSPK); + + message_to_indices(indices, m); + + for (i = 0; i < SPX_FORS_TREES; i++) { + idx_offset = i * (1 << SPX_FORS_HEIGHT); + + set_tree_height(fors_tree_addr, 0); + set_tree_index(fors_tree_addr, indices[i] + idx_offset); + set_type(fors_tree_addr, SPX_ADDR_TYPE_FORSPRF); + + /* Include the secret key part that produces the selected leaf node. */ + fors_gen_sk(sig, ctx, fors_tree_addr); + set_type(fors_tree_addr, SPX_ADDR_TYPE_FORSTREE); + sig += SPX_N; + + /* Compute the authentication path for this leaf node. */ + treehashx1(roots + i*SPX_N, sig, ctx, + indices[i], idx_offset, SPX_FORS_HEIGHT, fors_gen_leafx1, + fors_tree_addr, &fors_info); + + sig += SPX_N * SPX_FORS_HEIGHT; + } + + /* Hash horizontally across all tree roots to derive the public key. */ + thash(pk, roots, SPX_FORS_TREES, ctx, fors_pk_addr); +} + +/** + * Derives the FORS public key from a signature. + * This can be used for verification by comparing to a known public key, or to + * subsequently verify a signature on the derived public key. The latter is the + * typical use-case when used as an FTS below an OTS in a hypertree. + * Assumes m contains at least SPX_FORS_HEIGHT * SPX_FORS_TREES bits. + */ +void fors_pk_from_sig(unsigned char *pk, + const unsigned char *sig, const unsigned char *m, + const spx_ctx* ctx, + const uint32_t fors_addr[8]) +{ + uint32_t indices[SPX_FORS_TREES]; + unsigned char roots[SPX_FORS_TREES * SPX_N]; + unsigned char leaf[SPX_N]; + uint32_t fors_tree_addr[8] = {0}; + uint32_t fors_pk_addr[8] = {0}; + uint32_t idx_offset; + unsigned int i; + + copy_keypair_addr(fors_tree_addr, fors_addr); + copy_keypair_addr(fors_pk_addr, fors_addr); + + set_type(fors_tree_addr, SPX_ADDR_TYPE_FORSTREE); + set_type(fors_pk_addr, SPX_ADDR_TYPE_FORSPK); + + message_to_indices(indices, m); + + for (i = 0; i < SPX_FORS_TREES; i++) { + idx_offset = i * (1 << SPX_FORS_HEIGHT); + + set_tree_height(fors_tree_addr, 0); + set_tree_index(fors_tree_addr, indices[i] + idx_offset); + + /* Derive the leaf from the included secret key part. */ + fors_sk_to_leaf(leaf, sig, ctx, fors_tree_addr); + sig += SPX_N; + + /* Derive the corresponding root node of this tree. */ + compute_root(roots + i*SPX_N, leaf, indices[i], idx_offset, + sig, SPX_FORS_HEIGHT, ctx, fors_tree_addr); + sig += SPX_N * SPX_FORS_HEIGHT; + } + + /* Hash horizontally across all tree roots to derive the public key. */ + thash(pk, roots, SPX_FORS_TREES, ctx, fors_pk_addr); +} diff --git a/Blastproof/initfsgen/fors.h b/Blastproof/initfsgen/fors.h new file mode 100644 index 0000000..8d98017 --- /dev/null +++ b/Blastproof/initfsgen/fors.h @@ -0,0 +1,32 @@ +#ifndef SPX_FORS_H +#define SPX_FORS_H + +#include + +#include "params.h" +#include "context.h" + +/** + * Signs a message m, deriving the secret key from sk_seed and the FTS address. + * Assumes m contains at least SPX_FORS_HEIGHT * SPX_FORS_TREES bits. + */ +#define fors_sign SPX_NAMESPACE(fors_sign) +void fors_sign(unsigned char *sig, unsigned char *pk, + const unsigned char *m, + const spx_ctx* ctx, + const uint32_t fors_addr[8]); + +/** + * Derives the FORS public key from a signature. + * This can be used for verification by comparing to a known public key, or to + * subsequently verify a signature on the derived public key. The latter is the + * typical use-case when used as an FTS below an OTS in a hypertree. + * Assumes m contains at least SPX_FORS_HEIGHT * SPX_FORS_TREES bits. + */ +#define fors_pk_from_sig SPX_NAMESPACE(fors_pk_from_sig) +void fors_pk_from_sig(unsigned char *pk, + const unsigned char *sig, const unsigned char *m, + const spx_ctx* ctx, + const uint32_t fors_addr[8]); + +#endif diff --git a/Blastproof/initfsgen/hash.h b/Blastproof/initfsgen/hash.h new file mode 100644 index 0000000..b141f09 --- /dev/null +++ b/Blastproof/initfsgen/hash.h @@ -0,0 +1,27 @@ +#ifndef SPX_HASH_H +#define SPX_HASH_H + +#include +#include "context.h" +#include "params.h" + +#define initialize_hash_function SPX_NAMESPACE(initialize_hash_function) +void initialize_hash_function(spx_ctx *ctx); + +#define prf_addr SPX_NAMESPACE(prf_addr) +void prf_addr(unsigned char *out, const spx_ctx *ctx, + const uint32_t addr[8]); + +#define gen_message_random SPX_NAMESPACE(gen_message_random) +void gen_message_random(unsigned char *R, const unsigned char *sk_prf, + const unsigned char *optrand, + const unsigned char *m, unsigned long long mlen, + const spx_ctx *ctx); + +#define hash_message SPX_NAMESPACE(hash_message) +void hash_message(unsigned char *digest, uint64_t *tree, uint32_t *leaf_idx, + const unsigned char *R, const unsigned char *pk, + const unsigned char *m, unsigned long long mlen, + const spx_ctx *ctx); + +#endif diff --git a/Blastproof/initfsgen/hash_sha2.c b/Blastproof/initfsgen/hash_sha2.c new file mode 100644 index 0000000..67098e6 --- /dev/null +++ b/Blastproof/initfsgen/hash_sha2.c @@ -0,0 +1,197 @@ +#include +#include + +#include "address.h" +#include "utils.h" +#include "params.h" +#include "hash.h" +#include "sha2.h" + +#if SPX_N >= 24 +#define SPX_SHAX_OUTPUT_BYTES SPX_SHA512_OUTPUT_BYTES +#define SPX_SHAX_BLOCK_BYTES SPX_SHA512_BLOCK_BYTES +#define shaX_inc_init sha512_inc_init +#define shaX_inc_blocks sha512_inc_blocks +#define shaX_inc_finalize sha512_inc_finalize +#define shaX sha512 +#define mgf1_X mgf1_512 +#else +#define SPX_SHAX_OUTPUT_BYTES SPX_SHA256_OUTPUT_BYTES +#define SPX_SHAX_BLOCK_BYTES SPX_SHA256_BLOCK_BYTES +#define shaX_inc_init sha256_inc_init +#define shaX_inc_blocks sha256_inc_blocks +#define shaX_inc_finalize sha256_inc_finalize +#define shaX sha256 +#define mgf1_X mgf1_256 +#endif + + +/* For SHA, there is no immediate reason to initialize at the start, + so this function is an empty operation. */ +void initialize_hash_function(spx_ctx *ctx) +{ + seed_state(ctx); +} + +/* + * Computes PRF(pk_seed, sk_seed, addr). + */ +void prf_addr(unsigned char *out, const spx_ctx *ctx, + const uint32_t addr[8]) +{ + uint8_t sha2_state[40]; + unsigned char buf[SPX_SHA256_ADDR_BYTES + SPX_N]; + unsigned char outbuf[SPX_SHA256_OUTPUT_BYTES]; + + /* Retrieve precomputed state containing pub_seed */ + memcpy(sha2_state, ctx->state_seeded, 40 * sizeof(uint8_t)); + + /* Remainder: ADDR^c ‖ SK.seed */ + memcpy(buf, addr, SPX_SHA256_ADDR_BYTES); + memcpy(buf + SPX_SHA256_ADDR_BYTES, ctx->sk_seed, SPX_N); + + sha256_inc_finalize(outbuf, sha2_state, buf, SPX_SHA256_ADDR_BYTES + SPX_N); + + memcpy(out, outbuf, SPX_N); +} + +/** + * Computes the message-dependent randomness R, using a secret seed as a key + * for HMAC, and an optional randomization value prefixed to the message. + * This requires m to have at least SPX_SHAX_BLOCK_BYTES + SPX_N space + * available in front of the pointer, i.e. before the message to use for the + * prefix. This is necessary to prevent having to move the message around (and + * allocate memory for it). + */ +void gen_message_random(unsigned char *R, const unsigned char *sk_prf, + const unsigned char *optrand, + const unsigned char *m, unsigned long long mlen, + const spx_ctx *ctx) +{ + (void)ctx; + + unsigned char buf[SPX_SHAX_BLOCK_BYTES + SPX_SHAX_OUTPUT_BYTES]; + uint8_t state[8 + SPX_SHAX_OUTPUT_BYTES]; + int i; + +#if SPX_N > SPX_SHAX_BLOCK_BYTES + #error "Currently only supports SPX_N of at most SPX_SHAX_BLOCK_BYTES" +#endif + + /* This implements HMAC-SHA */ + for (i = 0; i < SPX_N; i++) { + buf[i] = 0x36 ^ sk_prf[i]; + } + memset(buf + SPX_N, 0x36, SPX_SHAX_BLOCK_BYTES - SPX_N); + + shaX_inc_init(state); + shaX_inc_blocks(state, buf, 1); + + memcpy(buf, optrand, SPX_N); + + /* If optrand + message cannot fill up an entire block */ + if (SPX_N + mlen < SPX_SHAX_BLOCK_BYTES) { + memcpy(buf + SPX_N, m, mlen); + shaX_inc_finalize(buf + SPX_SHAX_BLOCK_BYTES, state, + buf, mlen + SPX_N); + } + /* Otherwise first fill a block, so that finalize only uses the message */ + else { + memcpy(buf + SPX_N, m, SPX_SHAX_BLOCK_BYTES - SPX_N); + shaX_inc_blocks(state, buf, 1); + + m += SPX_SHAX_BLOCK_BYTES - SPX_N; + mlen -= SPX_SHAX_BLOCK_BYTES - SPX_N; + shaX_inc_finalize(buf + SPX_SHAX_BLOCK_BYTES, state, m, mlen); + } + + for (i = 0; i < SPX_N; i++) { + buf[i] = 0x5c ^ sk_prf[i]; + } + memset(buf + SPX_N, 0x5c, SPX_SHAX_BLOCK_BYTES - SPX_N); + + shaX(buf, buf, SPX_SHAX_BLOCK_BYTES + SPX_SHAX_OUTPUT_BYTES); + memcpy(R, buf, SPX_N); +} + +/** + * Computes the message hash using R, the public key, and the message. + * Outputs the message digest and the index of the leaf. The index is split in + * the tree index and the leaf index, for convenient copying to an address. + */ +void hash_message(unsigned char *digest, uint64_t *tree, uint32_t *leaf_idx, + const unsigned char *R, const unsigned char *pk, + const unsigned char *m, unsigned long long mlen, + const spx_ctx *ctx) +{ + (void)ctx; +#define SPX_TREE_BITS (SPX_TREE_HEIGHT * (SPX_D - 1)) +#define SPX_TREE_BYTES ((SPX_TREE_BITS + 7) / 8) +#define SPX_LEAF_BITS SPX_TREE_HEIGHT +#define SPX_LEAF_BYTES ((SPX_LEAF_BITS + 7) / 8) +#define SPX_DGST_BYTES (SPX_FORS_MSG_BYTES + SPX_TREE_BYTES + SPX_LEAF_BYTES) + + unsigned char seed[2*SPX_N + SPX_SHAX_OUTPUT_BYTES]; + + /* Round to nearest multiple of SPX_SHAX_BLOCK_BYTES */ +#if (SPX_SHAX_BLOCK_BYTES & (SPX_SHAX_BLOCK_BYTES-1)) != 0 + #error "Assumes that SPX_SHAX_BLOCK_BYTES is a power of 2" +#endif +#define SPX_INBLOCKS (((SPX_N + SPX_PK_BYTES + SPX_SHAX_BLOCK_BYTES - 1) & \ + -SPX_SHAX_BLOCK_BYTES) / SPX_SHAX_BLOCK_BYTES) + unsigned char inbuf[SPX_INBLOCKS * SPX_SHAX_BLOCK_BYTES]; + + unsigned char buf[SPX_DGST_BYTES]; + unsigned char *bufp = buf; + uint8_t state[8 + SPX_SHAX_OUTPUT_BYTES]; + + shaX_inc_init(state); + + // seed: SHA-X(R ‖ PK.seed ‖ PK.root ‖ M) + memcpy(inbuf, R, SPX_N); + memcpy(inbuf + SPX_N, pk, SPX_PK_BYTES); + + /* If R + pk + message cannot fill up an entire block */ + if (SPX_N + SPX_PK_BYTES + mlen < SPX_INBLOCKS * SPX_SHAX_BLOCK_BYTES) { + memcpy(inbuf + SPX_N + SPX_PK_BYTES, m, mlen); + shaX_inc_finalize(seed + 2*SPX_N, state, inbuf, SPX_N + SPX_PK_BYTES + mlen); + } + /* Otherwise first fill a block, so that finalize only uses the message */ + else { + memcpy(inbuf + SPX_N + SPX_PK_BYTES, m, + SPX_INBLOCKS * SPX_SHAX_BLOCK_BYTES - SPX_N - SPX_PK_BYTES); + shaX_inc_blocks(state, inbuf, SPX_INBLOCKS); + + m += SPX_INBLOCKS * SPX_SHAX_BLOCK_BYTES - SPX_N - SPX_PK_BYTES; + mlen -= SPX_INBLOCKS * SPX_SHAX_BLOCK_BYTES - SPX_N - SPX_PK_BYTES; + shaX_inc_finalize(seed + 2*SPX_N, state, m, mlen); + } + + // H_msg: MGF1-SHA-X(R ‖ PK.seed ‖ seed) + memcpy(seed, R, SPX_N); + memcpy(seed + SPX_N, pk, SPX_N); + + /* By doing this in two steps, we prevent hashing the message twice; + otherwise each iteration in MGF1 would hash the message again. */ + mgf1_X(bufp, SPX_DGST_BYTES, seed, 2*SPX_N + SPX_SHAX_OUTPUT_BYTES); + + memcpy(digest, bufp, SPX_FORS_MSG_BYTES); + bufp += SPX_FORS_MSG_BYTES; + +#if SPX_TREE_BITS > 64 + #error For given height and depth, 64 bits cannot represent all subtrees +#endif + + if (SPX_D == 1) { + *tree = 0; + } else { + *tree = bytes_to_ull(bufp, SPX_TREE_BYTES); + *tree &= (~(uint64_t)0) >> (64 - SPX_TREE_BITS); + } + bufp += SPX_TREE_BYTES; + + *leaf_idx = (uint32_t)bytes_to_ull(bufp, SPX_LEAF_BYTES); + *leaf_idx &= (~(uint32_t)0) >> (32 - SPX_LEAF_BITS); +} + + diff --git a/Blastproof/initfsgen/initfsgen.cpp b/Blastproof/initfsgen/initfsgen.cpp new file mode 100644 index 0000000..8473592 --- /dev/null +++ b/Blastproof/initfsgen/initfsgen.cpp @@ -0,0 +1,241 @@ +// SPDX-License-Identifier: MPL-2.0 +extern "C" { +#include "api.h" +#include "sha3.h" +} +#undef str +#include +#include +#include +#include +#include +#include +#include +#include +using namespace std; +namespace fs=filesystem; +#pragma pack(push,1) +struct initfs_header { + uint8_t sign[8]={'I','n','i','t','F','i','S','y'}; + uint16_t bootloader_version=0x0001; + uint16_t initfs_version=0x0001; + uint32_t os_version=0x00000001; + uint8_t installation_id[48]={0}; + uint64_t initfs_size=0; + uint64_t table_size=0; + uint64_t files_area_size=0; + uint64_t entries_width=256; + uint64_t entries_count=0; + uint64_t files_area_offset=0; + uint64_t entropy_check1=0; + uint64_t check1=0; + uint8_t entry_table_hash[64]={0}; + uint8_t files_area_hash[64]={0}; + uint8_t installation_id_hash_hash[64]={0}; + uint8_t padding[128]={0}; + uint8_t header_hash[64]={0}; +}; +#pragma pack(pop) +#pragma pack(push,1) +struct file_entry { + uint64_t file_offset=0; + uint64_t file_size=0; + uint8_t pk[64]={0}; + uint8_t hash[64]={0}; + char file_name[112]={0}; +}; +#pragma pack(pop) +#pragma pack(push,1) +struct signsyst_header { + uint8_t sign[8]={'S','i','g','n','S','y','s','t'}; + uint16_t bootloader_version=0x0001; + uint16_t initfs_version=0x0001; + uint32_t os_version=0x00000001; + uint8_t installation_id[48]={0}; + uint64_t signature_size=0; + uint64_t signature_count=0; + uint64_t signsyst_size=0; + uint64_t signature_block_size=0; + uint8_t signature_block_hash[64]; + uint8_t padding[288]={0}; + uint8_t header_hash[64]={0}; +}; +#pragma pack(pop) +void secure_erase(void *address,size_t size) { + explicit_bzero(address,size); +} +vector generate_padding(size_t x,unsigned char p) { + vector result(x,0); + size_t f0=0; + size_t f1=1; + while (f0 initfs_files_name; + vector initfs_files_size; + size_t total_size=0; + for (auto &entry:fs::directory_iterator(folder_path)) { + if (!entry.is_regular_file()) { + cout<<"[InitFSGen] Error: InitFS only support files. Following entry isn't a regular file : "< entropy_id; + entropy_id.resize(48); + ifstream random("/dev/urandom",ios::binary); + if (!random) { + cout<<"[InitFSGen] Error: Can't open secure entropy source."<(entropy_id.data()),entropy_id.size()); + if (random.gcount()!=entropy_id.size()) { + cout<<"[InitFSGen] Error: Can't read enougth entropy for installation id."< entropy; + entropy.resize(8); + random.read(reinterpret_cast(entropy.data()),entropy.size()); + if (random.gcount()!=entropy.size()) { + cout<<"[InitFSGen] Error: Can't read enougth entropy."<(header.installation_id_hash_hash),sizeof(header.installation_id_hash_hash)); + sha3(header.installation_id_hash_hash,sizeof(header.installation_id_hash_hash),header.installation_id_hash_hash,sizeof(header.installation_id_hash_hash)); + vector files_area; + vector files_entries_table; + vector sig_list; + sig_list.resize(CRYPTO_BYTES*header.entries_count); + size_t cursor=0; + files_area.resize(header.files_area_size); + uint8_t sk[CRYPTO_SECRETKEYBYTES]={0}; + for (size_t i=0;i(files_area.data()+cursor),initfs_files_size[i]); + if (file.gcount()!=initfs_files_size[i]) { + cout<<"[InitFSGen] Error: Couldn't read full file: "<(112)),entry.file_name); + entry.file_size=initfs_files_size[i]; + entry.file_offset=cursor; + sha3(files_area.data()+cursor,initfs_files_size[i],entry.hash,64); + if (crypto_sign_keypair(entry.pk,sk)) { + cout<<"[InitFSGen] Error: can't generate keypair for file: "< entries_table; + entries_table.resize(header.table_size); + for (size_t i=0;i(&header),sizeof(header)); + initfs_bin.write(reinterpret_cast(entries_table.data()),entries_table.size()); + initfs_bin.write(reinterpret_cast(files_area.data()),files_area.size()); + initfs_bin.close(); + ofstream signsyst_hash("signsyst-hash.bin",ios::binary); + if (!signsyst_hash) { + cout<<"[InitFSGen] Error: Can't open signsyst-hash.bin."< signsysthash(64,0); + sha3(&sign_header,sizeof(sign_header),signsysthash.data(),signsysthash.size()); + signsyst_hash.write(reinterpret_cast(signsysthash.data()),signsysthash.size()); + signsyst_hash.close(); + ofstream signsyst_bin("signsyst.bin",ios::binary); + if (!signsyst_bin) { + cout<<"[InitFSGen] Error: Can't open signsyst.bin."<(&sign_header),sizeof(sign_header)); + signsyst_bin.write(reinterpret_cast(sig_list.data()),sig_list.size()); + signsyst_bin.close(); + return 0; +} diff --git a/Blastproof/initfsgen/merkle.c b/Blastproof/initfsgen/merkle.c new file mode 100644 index 0000000..414f468 --- /dev/null +++ b/Blastproof/initfsgen/merkle.c @@ -0,0 +1,61 @@ +#include +#include + +#include "utils.h" +#include "utilsx1.h" +#include "wots.h" +#include "wotsx1.h" +#include "merkle.h" +#include "address.h" +#include "params.h" + +/* + * This generates a Merkle signature (WOTS signature followed by the Merkle + * authentication path). This is in this file because most of the complexity + * is involved with the WOTS signature; the Merkle authentication path logic + * is mostly hidden in treehashx4 + */ +void merkle_sign(uint8_t *sig, unsigned char *root, + const spx_ctx *ctx, + uint32_t wots_addr[8], uint32_t tree_addr[8], + uint32_t idx_leaf) +{ + unsigned char *auth_path = sig + SPX_WOTS_BYTES; + struct leaf_info_x1 info = { 0 }; + unsigned steps[ SPX_WOTS_LEN ]; + + info.wots_sig = sig; + chain_lengths(steps, root); + info.wots_steps = steps; + + set_type(&tree_addr[0], SPX_ADDR_TYPE_HASHTREE); + set_type(&info.pk_addr[0], SPX_ADDR_TYPE_WOTSPK); + copy_subtree_addr(&info.leaf_addr[0], wots_addr); + copy_subtree_addr(&info.pk_addr[0], wots_addr); + + info.wots_sign_leaf = idx_leaf; + + treehashx1(root, auth_path, ctx, + idx_leaf, 0, + SPX_TREE_HEIGHT, + wots_gen_leafx1, + tree_addr, &info); +} + +/* Compute root node of the top-most subtree. */ +void merkle_gen_root(unsigned char *root, const spx_ctx *ctx) +{ + /* We do not need the auth path in key generation, but it simplifies the + code to have just one treehash routine that computes both root and path + in one function. */ + unsigned char auth_path[SPX_TREE_HEIGHT * SPX_N + SPX_WOTS_BYTES]; + uint32_t top_tree_addr[8] = {0}; + uint32_t wots_addr[8] = {0}; + + set_layer_addr(top_tree_addr, SPX_D - 1); + set_layer_addr(wots_addr, SPX_D - 1); + + merkle_sign(auth_path, root, ctx, + wots_addr, top_tree_addr, + (uint32_t)~0 /* ~0 means "don't bother generating an auth path */ ); +} diff --git a/Blastproof/initfsgen/merkle.h b/Blastproof/initfsgen/merkle.h new file mode 100644 index 0000000..9ac2759 --- /dev/null +++ b/Blastproof/initfsgen/merkle.h @@ -0,0 +1,18 @@ +#if !defined( MERKLE_H_ ) +#define MERKLE_H_ + +#include + +/* Generate a Merkle signature (WOTS signature followed by the Merkle */ +/* authentication path) */ +#define merkle_sign SPX_NAMESPACE(merkle_sign) +void merkle_sign(uint8_t *sig, unsigned char *root, + const spx_ctx* ctx, + uint32_t wots_addr[8], uint32_t tree_addr[8], + uint32_t idx_leaf); + +/* Compute the root node of the top-most subtree. */ +#define merkle_gen_root SPX_NAMESPACE(merkle_gen_root) +void merkle_gen_root(unsigned char *root, const spx_ctx* ctx); + +#endif /* MERKLE_H_ */ diff --git a/Blastproof/initfsgen/params.h b/Blastproof/initfsgen/params.h new file mode 100644 index 0000000..6dc6974 --- /dev/null +++ b/Blastproof/initfsgen/params.h @@ -0,0 +1,3 @@ +#define str(s) #s +#define xstr(s) str(s) +#include "params/params-sphincs-sha2-256f.h" diff --git a/Blastproof/initfsgen/params/params-sphincs-sha2-256f.h b/Blastproof/initfsgen/params/params-sphincs-sha2-256f.h new file mode 100644 index 0000000..53c5bef --- /dev/null +++ b/Blastproof/initfsgen/params/params-sphincs-sha2-256f.h @@ -0,0 +1,85 @@ +#ifndef SPX_PARAMS_H +#define SPX_PARAMS_H + +#define SPX_NAMESPACE(s) SPX_##s + +/* Hash output length in bytes. */ +#define SPX_N 32 +/* Height of the hypertree. */ +#define SPX_FULL_HEIGHT 68 +/* Number of subtree layer. */ +#define SPX_D 17 +/* FORS tree dimensions. */ +#define SPX_FORS_HEIGHT 9 +#define SPX_FORS_TREES 35 +/* Winternitz parameter, */ +#define SPX_WOTS_W 16 + +/* The hash function is defined by linking a different hash.c file, as opposed + to setting a #define constant. */ + +/* This is a SHA2-based parameter set, hence whether we use SHA-256 + * exclusively or we use both SHA-256 and SHA-512 is controlled by + * the following #define */ +#define SPX_SHA512 1 /* Use SHA-512 for H and T_l, l >= 2 */ + +/* For clarity */ +#define SPX_ADDR_BYTES 32 + +/* WOTS parameters. */ +#if SPX_WOTS_W == 256 + #define SPX_WOTS_LOGW 8 +#elif SPX_WOTS_W == 16 + #define SPX_WOTS_LOGW 4 +#else + #error SPX_WOTS_W assumed 16 or 256 +#endif + +#define SPX_WOTS_LEN1 (8 * SPX_N / SPX_WOTS_LOGW) + +/* SPX_WOTS_LEN2 is floor(log(len_1 * (w - 1)) / log(w)) + 1; we precompute */ +#if SPX_WOTS_W == 256 + #if SPX_N <= 1 + #define SPX_WOTS_LEN2 1 + #elif SPX_N <= 256 + #define SPX_WOTS_LEN2 2 + #else + #error Did not precompute SPX_WOTS_LEN2 for n outside {2, .., 256} + #endif +#elif SPX_WOTS_W == 16 + #if SPX_N <= 8 + #define SPX_WOTS_LEN2 2 + #elif SPX_N <= 136 + #define SPX_WOTS_LEN2 3 + #elif SPX_N <= 256 + #define SPX_WOTS_LEN2 4 + #else + #error Did not precompute SPX_WOTS_LEN2 for n outside {2, .., 256} + #endif +#endif + +#define SPX_WOTS_LEN (SPX_WOTS_LEN1 + SPX_WOTS_LEN2) +#define SPX_WOTS_BYTES (SPX_WOTS_LEN * SPX_N) +#define SPX_WOTS_PK_BYTES SPX_WOTS_BYTES + +/* Subtree size. */ +#define SPX_TREE_HEIGHT (SPX_FULL_HEIGHT / SPX_D) + +#if SPX_TREE_HEIGHT * SPX_D != SPX_FULL_HEIGHT + #error SPX_D should always divide SPX_FULL_HEIGHT +#endif + +/* FORS parameters. */ +#define SPX_FORS_MSG_BYTES ((SPX_FORS_HEIGHT * SPX_FORS_TREES + 7) / 8) +#define SPX_FORS_BYTES ((SPX_FORS_HEIGHT + 1) * SPX_FORS_TREES * SPX_N) +#define SPX_FORS_PK_BYTES SPX_N + +/* Resulting SPX sizes. */ +#define SPX_BYTES (SPX_N + SPX_FORS_BYTES + SPX_D * SPX_WOTS_BYTES +\ + SPX_FULL_HEIGHT * SPX_N) +#define SPX_PK_BYTES (2 * SPX_N) +#define SPX_SK_BYTES (2 * SPX_N + SPX_PK_BYTES) + +#include "../sha2_offsets.h" + +#endif diff --git a/Blastproof/initfsgen/randombytes.c b/Blastproof/initfsgen/randombytes.c new file mode 100644 index 0000000..cfbca17 --- /dev/null +++ b/Blastproof/initfsgen/randombytes.c @@ -0,0 +1,43 @@ +/* +This code was taken from the SPHINCS reference implementation and is public domain. +*/ + +#include +#include + +#include "randombytes.h" + +static int fd = -1; + +void randombytes(unsigned char *x, unsigned long long xlen) +{ + unsigned long long i; + + if (fd == -1) { + for (;;) { + fd = open("/dev/urandom", O_RDONLY); + if (fd != -1) { + break; + } + sleep(1); + } + } + + while (xlen > 0) { + if (xlen < 1048576) { + i = xlen; + } + else { + i = 1048576; + } + + i = (unsigned long long)read(fd, x, i); + if (i < 1) { + sleep(1); + continue; + } + + x += i; + xlen -= i; + } +} diff --git a/Blastproof/initfsgen/randombytes.h b/Blastproof/initfsgen/randombytes.h new file mode 100644 index 0000000..671c1b1 --- /dev/null +++ b/Blastproof/initfsgen/randombytes.h @@ -0,0 +1,6 @@ +#ifndef SPX_RANDOMBYTES_H +#define SPX_RANDOMBYTES_H + +extern void randombytes(unsigned char * x,unsigned long long xlen); + +#endif diff --git a/Blastproof/initfsgen/sha2.c b/Blastproof/initfsgen/sha2.c new file mode 100644 index 0000000..ef73047 --- /dev/null +++ b/Blastproof/initfsgen/sha2.c @@ -0,0 +1,700 @@ +/* Based on the public domain implementation in + * crypto_hash/sha512/ref/ from http://bench.cr.yp.to/supercop.html + * by D. J. Bernstein */ + +#include +#include +#include + +#include "utils.h" +#include "sha2.h" + +static uint32_t load_bigendian_32(const uint8_t *x) { + return (uint32_t)(x[3]) | (((uint32_t)(x[2])) << 8) | + (((uint32_t)(x[1])) << 16) | (((uint32_t)(x[0])) << 24); +} + +static uint64_t load_bigendian_64(const uint8_t *x) { + return (uint64_t)(x[7]) | (((uint64_t)(x[6])) << 8) | + (((uint64_t)(x[5])) << 16) | (((uint64_t)(x[4])) << 24) | + (((uint64_t)(x[3])) << 32) | (((uint64_t)(x[2])) << 40) | + (((uint64_t)(x[1])) << 48) | (((uint64_t)(x[0])) << 56); +} + +static void store_bigendian_32(uint8_t *x, uint64_t u) { + x[3] = (uint8_t) u; + u >>= 8; + x[2] = (uint8_t) u; + u >>= 8; + x[1] = (uint8_t) u; + u >>= 8; + x[0] = (uint8_t) u; +} + +static void store_bigendian_64(uint8_t *x, uint64_t u) { + x[7] = (uint8_t) u; + u >>= 8; + x[6] = (uint8_t) u; + u >>= 8; + x[5] = (uint8_t) u; + u >>= 8; + x[4] = (uint8_t) u; + u >>= 8; + x[3] = (uint8_t) u; + u >>= 8; + x[2] = (uint8_t) u; + u >>= 8; + x[1] = (uint8_t) u; + u >>= 8; + x[0] = (uint8_t) u; +} + +#define SHR(x, c) ((x) >> (c)) +#define ROTR_32(x, c) (((x) >> (c)) | ((x) << (32 - (c)))) +#define ROTR_64(x,c) (((x) >> (c)) | ((x) << (64 - (c)))) + +#define Ch(x, y, z) (((x) & (y)) ^ (~(x) & (z))) +#define Maj(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) + +#define Sigma0_32(x) (ROTR_32(x, 2) ^ ROTR_32(x,13) ^ ROTR_32(x,22)) +#define Sigma1_32(x) (ROTR_32(x, 6) ^ ROTR_32(x,11) ^ ROTR_32(x,25)) +#define sigma0_32(x) (ROTR_32(x, 7) ^ ROTR_32(x,18) ^ SHR(x, 3)) +#define sigma1_32(x) (ROTR_32(x,17) ^ ROTR_32(x,19) ^ SHR(x,10)) + +#define Sigma0_64(x) (ROTR_64(x,28) ^ ROTR_64(x,34) ^ ROTR_64(x,39)) +#define Sigma1_64(x) (ROTR_64(x,14) ^ ROTR_64(x,18) ^ ROTR_64(x,41)) +#define sigma0_64(x) (ROTR_64(x, 1) ^ ROTR_64(x, 8) ^ SHR(x,7)) +#define sigma1_64(x) (ROTR_64(x,19) ^ ROTR_64(x,61) ^ SHR(x,6)) + +#define M_32(w0, w14, w9, w1) w0 = sigma1_32(w14) + (w9) + sigma0_32(w1) + (w0); +#define M_64(w0, w14, w9, w1) w0 = sigma1_64(w14) + (w9) + sigma0_64(w1) + (w0); + +#define EXPAND_32 \ + M_32(w0, w14, w9, w1) \ + M_32(w1, w15, w10, w2) \ + M_32(w2, w0, w11, w3) \ + M_32(w3, w1, w12, w4) \ + M_32(w4, w2, w13, w5) \ + M_32(w5, w3, w14, w6) \ + M_32(w6, w4, w15, w7) \ + M_32(w7, w5, w0, w8) \ + M_32(w8, w6, w1, w9) \ + M_32(w9, w7, w2, w10) \ + M_32(w10, w8, w3, w11) \ + M_32(w11, w9, w4, w12) \ + M_32(w12, w10, w5, w13) \ + M_32(w13, w11, w6, w14) \ + M_32(w14, w12, w7, w15) \ + M_32(w15, w13, w8, w0) + +#define EXPAND_64 \ + M_64(w0 ,w14,w9 ,w1 ) \ + M_64(w1 ,w15,w10,w2 ) \ + M_64(w2 ,w0 ,w11,w3 ) \ + M_64(w3 ,w1 ,w12,w4 ) \ + M_64(w4 ,w2 ,w13,w5 ) \ + M_64(w5 ,w3 ,w14,w6 ) \ + M_64(w6 ,w4 ,w15,w7 ) \ + M_64(w7 ,w5 ,w0 ,w8 ) \ + M_64(w8 ,w6 ,w1 ,w9 ) \ + M_64(w9 ,w7 ,w2 ,w10) \ + M_64(w10,w8 ,w3 ,w11) \ + M_64(w11,w9 ,w4 ,w12) \ + M_64(w12,w10,w5 ,w13) \ + M_64(w13,w11,w6 ,w14) \ + M_64(w14,w12,w7 ,w15) \ + M_64(w15,w13,w8 ,w0 ) + +#define F_32(w, k) \ + T1 = h + Sigma1_32(e) + Ch(e, f, g) + (k) + (w); \ + T2 = Sigma0_32(a) + Maj(a, b, c); \ + h = g; \ + g = f; \ + f = e; \ + e = d + T1; \ + d = c; \ + c = b; \ + b = a; \ + a = T1 + T2; + +#define F_64(w,k) \ + T1 = h + Sigma1_64(e) + Ch(e,f,g) + k + w; \ + T2 = Sigma0_64(a) + Maj(a,b,c); \ + h = g; \ + g = f; \ + f = e; \ + e = d + T1; \ + d = c; \ + c = b; \ + b = a; \ + a = T1 + T2; + +static size_t crypto_hashblocks_sha256(uint8_t *statebytes, + const uint8_t *in, size_t inlen) { + uint32_t state[8]; + uint32_t a; + uint32_t b; + uint32_t c; + uint32_t d; + uint32_t e; + uint32_t f; + uint32_t g; + uint32_t h; + uint32_t T1; + uint32_t T2; + + a = load_bigendian_32(statebytes + 0); + state[0] = a; + b = load_bigendian_32(statebytes + 4); + state[1] = b; + c = load_bigendian_32(statebytes + 8); + state[2] = c; + d = load_bigendian_32(statebytes + 12); + state[3] = d; + e = load_bigendian_32(statebytes + 16); + state[4] = e; + f = load_bigendian_32(statebytes + 20); + state[5] = f; + g = load_bigendian_32(statebytes + 24); + state[6] = g; + h = load_bigendian_32(statebytes + 28); + state[7] = h; + + while (inlen >= 64) { + uint32_t w0 = load_bigendian_32(in + 0); + uint32_t w1 = load_bigendian_32(in + 4); + uint32_t w2 = load_bigendian_32(in + 8); + uint32_t w3 = load_bigendian_32(in + 12); + uint32_t w4 = load_bigendian_32(in + 16); + uint32_t w5 = load_bigendian_32(in + 20); + uint32_t w6 = load_bigendian_32(in + 24); + uint32_t w7 = load_bigendian_32(in + 28); + uint32_t w8 = load_bigendian_32(in + 32); + uint32_t w9 = load_bigendian_32(in + 36); + uint32_t w10 = load_bigendian_32(in + 40); + uint32_t w11 = load_bigendian_32(in + 44); + uint32_t w12 = load_bigendian_32(in + 48); + uint32_t w13 = load_bigendian_32(in + 52); + uint32_t w14 = load_bigendian_32(in + 56); + uint32_t w15 = load_bigendian_32(in + 60); + + F_32(w0, 0x428a2f98) + F_32(w1, 0x71374491) + F_32(w2, 0xb5c0fbcf) + F_32(w3, 0xe9b5dba5) + F_32(w4, 0x3956c25b) + F_32(w5, 0x59f111f1) + F_32(w6, 0x923f82a4) + F_32(w7, 0xab1c5ed5) + F_32(w8, 0xd807aa98) + F_32(w9, 0x12835b01) + F_32(w10, 0x243185be) + F_32(w11, 0x550c7dc3) + F_32(w12, 0x72be5d74) + F_32(w13, 0x80deb1fe) + F_32(w14, 0x9bdc06a7) + F_32(w15, 0xc19bf174) + + EXPAND_32 + + F_32(w0, 0xe49b69c1) + F_32(w1, 0xefbe4786) + F_32(w2, 0x0fc19dc6) + F_32(w3, 0x240ca1cc) + F_32(w4, 0x2de92c6f) + F_32(w5, 0x4a7484aa) + F_32(w6, 0x5cb0a9dc) + F_32(w7, 0x76f988da) + F_32(w8, 0x983e5152) + F_32(w9, 0xa831c66d) + F_32(w10, 0xb00327c8) + F_32(w11, 0xbf597fc7) + F_32(w12, 0xc6e00bf3) + F_32(w13, 0xd5a79147) + F_32(w14, 0x06ca6351) + F_32(w15, 0x14292967) + + EXPAND_32 + + F_32(w0, 0x27b70a85) + F_32(w1, 0x2e1b2138) + F_32(w2, 0x4d2c6dfc) + F_32(w3, 0x53380d13) + F_32(w4, 0x650a7354) + F_32(w5, 0x766a0abb) + F_32(w6, 0x81c2c92e) + F_32(w7, 0x92722c85) + F_32(w8, 0xa2bfe8a1) + F_32(w9, 0xa81a664b) + F_32(w10, 0xc24b8b70) + F_32(w11, 0xc76c51a3) + F_32(w12, 0xd192e819) + F_32(w13, 0xd6990624) + F_32(w14, 0xf40e3585) + F_32(w15, 0x106aa070) + + EXPAND_32 + + F_32(w0, 0x19a4c116) + F_32(w1, 0x1e376c08) + F_32(w2, 0x2748774c) + F_32(w3, 0x34b0bcb5) + F_32(w4, 0x391c0cb3) + F_32(w5, 0x4ed8aa4a) + F_32(w6, 0x5b9cca4f) + F_32(w7, 0x682e6ff3) + F_32(w8, 0x748f82ee) + F_32(w9, 0x78a5636f) + F_32(w10, 0x84c87814) + F_32(w11, 0x8cc70208) + F_32(w12, 0x90befffa) + F_32(w13, 0xa4506ceb) + F_32(w14, 0xbef9a3f7) + F_32(w15, 0xc67178f2) + + a += state[0]; + b += state[1]; + c += state[2]; + d += state[3]; + e += state[4]; + f += state[5]; + g += state[6]; + h += state[7]; + + state[0] = a; + state[1] = b; + state[2] = c; + state[3] = d; + state[4] = e; + state[5] = f; + state[6] = g; + state[7] = h; + + in += 64; + inlen -= 64; + } + + store_bigendian_32(statebytes + 0, state[0]); + store_bigendian_32(statebytes + 4, state[1]); + store_bigendian_32(statebytes + 8, state[2]); + store_bigendian_32(statebytes + 12, state[3]); + store_bigendian_32(statebytes + 16, state[4]); + store_bigendian_32(statebytes + 20, state[5]); + store_bigendian_32(statebytes + 24, state[6]); + store_bigendian_32(statebytes + 28, state[7]); + + return inlen; +} + +static int crypto_hashblocks_sha512(unsigned char *statebytes,const unsigned char *in,unsigned long long inlen) +{ + uint64_t state[8]; + uint64_t a; + uint64_t b; + uint64_t c; + uint64_t d; + uint64_t e; + uint64_t f; + uint64_t g; + uint64_t h; + uint64_t T1; + uint64_t T2; + + a = load_bigendian_64(statebytes + 0); state[0] = a; + b = load_bigendian_64(statebytes + 8); state[1] = b; + c = load_bigendian_64(statebytes + 16); state[2] = c; + d = load_bigendian_64(statebytes + 24); state[3] = d; + e = load_bigendian_64(statebytes + 32); state[4] = e; + f = load_bigendian_64(statebytes + 40); state[5] = f; + g = load_bigendian_64(statebytes + 48); state[6] = g; + h = load_bigendian_64(statebytes + 56); state[7] = h; + + while (inlen >= 128) { + uint64_t w0 = load_bigendian_64(in + 0); + uint64_t w1 = load_bigendian_64(in + 8); + uint64_t w2 = load_bigendian_64(in + 16); + uint64_t w3 = load_bigendian_64(in + 24); + uint64_t w4 = load_bigendian_64(in + 32); + uint64_t w5 = load_bigendian_64(in + 40); + uint64_t w6 = load_bigendian_64(in + 48); + uint64_t w7 = load_bigendian_64(in + 56); + uint64_t w8 = load_bigendian_64(in + 64); + uint64_t w9 = load_bigendian_64(in + 72); + uint64_t w10 = load_bigendian_64(in + 80); + uint64_t w11 = load_bigendian_64(in + 88); + uint64_t w12 = load_bigendian_64(in + 96); + uint64_t w13 = load_bigendian_64(in + 104); + uint64_t w14 = load_bigendian_64(in + 112); + uint64_t w15 = load_bigendian_64(in + 120); + + F_64(w0 ,0x428a2f98d728ae22ULL) + F_64(w1 ,0x7137449123ef65cdULL) + F_64(w2 ,0xb5c0fbcfec4d3b2fULL) + F_64(w3 ,0xe9b5dba58189dbbcULL) + F_64(w4 ,0x3956c25bf348b538ULL) + F_64(w5 ,0x59f111f1b605d019ULL) + F_64(w6 ,0x923f82a4af194f9bULL) + F_64(w7 ,0xab1c5ed5da6d8118ULL) + F_64(w8 ,0xd807aa98a3030242ULL) + F_64(w9 ,0x12835b0145706fbeULL) + F_64(w10,0x243185be4ee4b28cULL) + F_64(w11,0x550c7dc3d5ffb4e2ULL) + F_64(w12,0x72be5d74f27b896fULL) + F_64(w13,0x80deb1fe3b1696b1ULL) + F_64(w14,0x9bdc06a725c71235ULL) + F_64(w15,0xc19bf174cf692694ULL) + + EXPAND_64 + + F_64(w0 ,0xe49b69c19ef14ad2ULL) + F_64(w1 ,0xefbe4786384f25e3ULL) + F_64(w2 ,0x0fc19dc68b8cd5b5ULL) + F_64(w3 ,0x240ca1cc77ac9c65ULL) + F_64(w4 ,0x2de92c6f592b0275ULL) + F_64(w5 ,0x4a7484aa6ea6e483ULL) + F_64(w6 ,0x5cb0a9dcbd41fbd4ULL) + F_64(w7 ,0x76f988da831153b5ULL) + F_64(w8 ,0x983e5152ee66dfabULL) + F_64(w9 ,0xa831c66d2db43210ULL) + F_64(w10,0xb00327c898fb213fULL) + F_64(w11,0xbf597fc7beef0ee4ULL) + F_64(w12,0xc6e00bf33da88fc2ULL) + F_64(w13,0xd5a79147930aa725ULL) + F_64(w14,0x06ca6351e003826fULL) + F_64(w15,0x142929670a0e6e70ULL) + + EXPAND_64 + + F_64(w0 ,0x27b70a8546d22ffcULL) + F_64(w1 ,0x2e1b21385c26c926ULL) + F_64(w2 ,0x4d2c6dfc5ac42aedULL) + F_64(w3 ,0x53380d139d95b3dfULL) + F_64(w4 ,0x650a73548baf63deULL) + F_64(w5 ,0x766a0abb3c77b2a8ULL) + F_64(w6 ,0x81c2c92e47edaee6ULL) + F_64(w7 ,0x92722c851482353bULL) + F_64(w8 ,0xa2bfe8a14cf10364ULL) + F_64(w9 ,0xa81a664bbc423001ULL) + F_64(w10,0xc24b8b70d0f89791ULL) + F_64(w11,0xc76c51a30654be30ULL) + F_64(w12,0xd192e819d6ef5218ULL) + F_64(w13,0xd69906245565a910ULL) + F_64(w14,0xf40e35855771202aULL) + F_64(w15,0x106aa07032bbd1b8ULL) + + EXPAND_64 + + F_64(w0 ,0x19a4c116b8d2d0c8ULL) + F_64(w1 ,0x1e376c085141ab53ULL) + F_64(w2 ,0x2748774cdf8eeb99ULL) + F_64(w3 ,0x34b0bcb5e19b48a8ULL) + F_64(w4 ,0x391c0cb3c5c95a63ULL) + F_64(w5 ,0x4ed8aa4ae3418acbULL) + F_64(w6 ,0x5b9cca4f7763e373ULL) + F_64(w7 ,0x682e6ff3d6b2b8a3ULL) + F_64(w8 ,0x748f82ee5defb2fcULL) + F_64(w9 ,0x78a5636f43172f60ULL) + F_64(w10,0x84c87814a1f0ab72ULL) + F_64(w11,0x8cc702081a6439ecULL) + F_64(w12,0x90befffa23631e28ULL) + F_64(w13,0xa4506cebde82bde9ULL) + F_64(w14,0xbef9a3f7b2c67915ULL) + F_64(w15,0xc67178f2e372532bULL) + + EXPAND_64 + + F_64(w0 ,0xca273eceea26619cULL) + F_64(w1 ,0xd186b8c721c0c207ULL) + F_64(w2 ,0xeada7dd6cde0eb1eULL) + F_64(w3 ,0xf57d4f7fee6ed178ULL) + F_64(w4 ,0x06f067aa72176fbaULL) + F_64(w5 ,0x0a637dc5a2c898a6ULL) + F_64(w6 ,0x113f9804bef90daeULL) + F_64(w7 ,0x1b710b35131c471bULL) + F_64(w8 ,0x28db77f523047d84ULL) + F_64(w9 ,0x32caab7b40c72493ULL) + F_64(w10,0x3c9ebe0a15c9bebcULL) + F_64(w11,0x431d67c49c100d4cULL) + F_64(w12,0x4cc5d4becb3e42b6ULL) + F_64(w13,0x597f299cfc657e2aULL) + F_64(w14,0x5fcb6fab3ad6faecULL) + F_64(w15,0x6c44198c4a475817ULL) + + a += state[0]; + b += state[1]; + c += state[2]; + d += state[3]; + e += state[4]; + f += state[5]; + g += state[6]; + h += state[7]; + + state[0] = a; + state[1] = b; + state[2] = c; + state[3] = d; + state[4] = e; + state[5] = f; + state[6] = g; + state[7] = h; + + in += 128; + inlen -= 128; + } + + store_bigendian_64(statebytes + 0,state[0]); + store_bigendian_64(statebytes + 8,state[1]); + store_bigendian_64(statebytes + 16,state[2]); + store_bigendian_64(statebytes + 24,state[3]); + store_bigendian_64(statebytes + 32,state[4]); + store_bigendian_64(statebytes + 40,state[5]); + store_bigendian_64(statebytes + 48,state[6]); + store_bigendian_64(statebytes + 56,state[7]); + + return inlen; +} + + +static const uint8_t iv_256[32] = { + 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, + 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, + 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05, 0x68, 0x8c, + 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19 +}; + +static const uint8_t iv_512[64] = { + 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae, + 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, + 0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, + 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, + 0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, + 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79 +}; + +void sha256_inc_init(uint8_t *state) { + for (size_t i = 0; i < 32; ++i) { + state[i] = iv_256[i]; + } + for (size_t i = 32; i < 40; ++i) { + state[i] = 0; + } +} + +void sha512_inc_init(uint8_t *state) { + for (size_t i = 0; i < 64; ++i) { + state[i] = iv_512[i]; + } + for (size_t i = 64; i < 72; ++i) { + state[i] = 0; + } +} + +void sha256_inc_blocks(uint8_t *state, const uint8_t *in, size_t inblocks) { + uint64_t bytes = load_bigendian_64(state + 32); + + crypto_hashblocks_sha256(state, in, 64 * inblocks); + bytes += 64 * inblocks; + + store_bigendian_64(state + 32, bytes); +} + +void sha512_inc_blocks(uint8_t *state, const uint8_t *in, size_t inblocks) { + uint64_t bytes = load_bigendian_64(state + 64); + + crypto_hashblocks_sha512(state, in, 128 * inblocks); + bytes += 128 * inblocks; + + store_bigendian_64(state + 64, bytes); +} + +void sha256_inc_finalize(uint8_t *out, uint8_t *state, const uint8_t *in, size_t inlen) { + uint8_t padded[128]; + uint64_t bytes = load_bigendian_64(state + 32) + inlen; + + crypto_hashblocks_sha256(state, in, inlen); + in += inlen; + inlen &= 63; + in -= inlen; + + for (size_t i = 0; i < inlen; ++i) { + padded[i] = in[i]; + } + padded[inlen] = 0x80; + + if (inlen < 56) { + for (size_t i = inlen + 1; i < 56; ++i) { + padded[i] = 0; + } + padded[56] = (uint8_t) (bytes >> 53); + padded[57] = (uint8_t) (bytes >> 45); + padded[58] = (uint8_t) (bytes >> 37); + padded[59] = (uint8_t) (bytes >> 29); + padded[60] = (uint8_t) (bytes >> 21); + padded[61] = (uint8_t) (bytes >> 13); + padded[62] = (uint8_t) (bytes >> 5); + padded[63] = (uint8_t) (bytes << 3); + crypto_hashblocks_sha256(state, padded, 64); + } else { + for (size_t i = inlen + 1; i < 120; ++i) { + padded[i] = 0; + } + padded[120] = (uint8_t) (bytes >> 53); + padded[121] = (uint8_t) (bytes >> 45); + padded[122] = (uint8_t) (bytes >> 37); + padded[123] = (uint8_t) (bytes >> 29); + padded[124] = (uint8_t) (bytes >> 21); + padded[125] = (uint8_t) (bytes >> 13); + padded[126] = (uint8_t) (bytes >> 5); + padded[127] = (uint8_t) (bytes << 3); + crypto_hashblocks_sha256(state, padded, 128); + } + + for (size_t i = 0; i < 32; ++i) { + out[i] = state[i]; + } + +} + +void sha512_inc_finalize(uint8_t *out, uint8_t *state, const uint8_t *in, size_t inlen) { + uint8_t padded[256]; + uint64_t bytes = load_bigendian_64(state + 64) + inlen; + + crypto_hashblocks_sha512(state, in, inlen); + in += inlen; + inlen &= 127; + in -= inlen; + + for (size_t i = 0; i < inlen; ++i) { + padded[i] = in[i]; + } + padded[inlen] = 0x80; + + if (inlen < 112) { + for (size_t i = inlen + 1; i < 119; ++i) { + padded[i] = 0; + } + padded[119] = (uint8_t) (bytes >> 61); + padded[120] = (uint8_t) (bytes >> 53); + padded[121] = (uint8_t) (bytes >> 45); + padded[122] = (uint8_t) (bytes >> 37); + padded[123] = (uint8_t) (bytes >> 29); + padded[124] = (uint8_t) (bytes >> 21); + padded[125] = (uint8_t) (bytes >> 13); + padded[126] = (uint8_t) (bytes >> 5); + padded[127] = (uint8_t) (bytes << 3); + crypto_hashblocks_sha512(state, padded, 128); + } else { + for (size_t i = inlen + 1; i < 247; ++i) { + padded[i] = 0; + } + padded[247] = (uint8_t) (bytes >> 61); + padded[248] = (uint8_t) (bytes >> 53); + padded[249] = (uint8_t) (bytes >> 45); + padded[250] = (uint8_t) (bytes >> 37); + padded[251] = (uint8_t) (bytes >> 29); + padded[252] = (uint8_t) (bytes >> 21); + padded[253] = (uint8_t) (bytes >> 13); + padded[254] = (uint8_t) (bytes >> 5); + padded[255] = (uint8_t) (bytes << 3); + crypto_hashblocks_sha512(state, padded, 256); + } + + for (size_t i = 0; i < 64; ++i) { + out[i] = state[i]; + } +} + +void sha256(uint8_t *out, const uint8_t *in, size_t inlen) { + uint8_t state[40]; + + sha256_inc_init(state); + sha256_inc_finalize(out, state, in, inlen); +} + +void sha512(uint8_t *out, const uint8_t *in, size_t inlen) { + uint8_t state[72]; + + sha512_inc_init(state); + sha512_inc_finalize(out, state, in, inlen); +} + +/** + * mgf1 function based on the SHA-256 hash function + * Note that inlen should be sufficiently small that it still allows for + * an array to be allocated on the stack. Typically 'in' is merely a seed. + * Outputs outlen number of bytes + */ +void mgf1_256(unsigned char *out, unsigned long outlen, + const unsigned char *in, unsigned long inlen) +{ + SPX_VLA(uint8_t, inbuf, inlen+4); + unsigned char outbuf[SPX_SHA256_OUTPUT_BYTES]; + unsigned long i; + + memcpy(inbuf, in, inlen); + + /* While we can fit in at least another full block of SHA256 output.. */ + for (i = 0; (i+1)*SPX_SHA256_OUTPUT_BYTES <= outlen; i++) { + u32_to_bytes(inbuf + inlen, i); + sha256(out, inbuf, inlen + 4); + out += SPX_SHA256_OUTPUT_BYTES; + } + /* Until we cannot anymore, and we fill the remainder. */ + if (outlen > i*SPX_SHA256_OUTPUT_BYTES) { + u32_to_bytes(inbuf + inlen, i); + sha256(outbuf, inbuf, inlen + 4); + memcpy(out, outbuf, outlen - i*SPX_SHA256_OUTPUT_BYTES); + } +} + +/* + * mgf1 function based on the SHA-512 hash function + */ +void mgf1_512(unsigned char *out, unsigned long outlen, + const unsigned char *in, unsigned long inlen) +{ + SPX_VLA(uint8_t, inbuf, inlen+4); + unsigned char outbuf[SPX_SHA512_OUTPUT_BYTES]; + unsigned long i; + + memcpy(inbuf, in, inlen); + + /* While we can fit in at least another full block of SHA512 output.. */ + for (i = 0; (i+1)*SPX_SHA512_OUTPUT_BYTES <= outlen; i++) { + u32_to_bytes(inbuf + inlen, i); + sha512(out, inbuf, inlen + 4); + out += SPX_SHA512_OUTPUT_BYTES; + } + /* Until we cannot anymore, and we fill the remainder. */ + if (outlen > i*SPX_SHA512_OUTPUT_BYTES) { + u32_to_bytes(inbuf + inlen, i); + sha512(outbuf, inbuf, inlen + 4); + memcpy(out, outbuf, outlen - i*SPX_SHA512_OUTPUT_BYTES); + } +} + + +/** + * Absorb the constant pub_seed using one round of the compression function + * This initializes state_seeded and state_seeded_512, which can then be + * reused in thash + **/ +void seed_state(spx_ctx *ctx) { + uint8_t block[SPX_SHA512_BLOCK_BYTES]; + size_t i; + + for (i = 0; i < SPX_N; ++i) { + block[i] = ctx->pub_seed[i]; + } + for (i = SPX_N; i < SPX_SHA512_BLOCK_BYTES; ++i) { + block[i] = 0; + } + /* block has been properly initialized for both SHA-256 and SHA-512 */ + + sha256_inc_init(ctx->state_seeded); + sha256_inc_blocks(ctx->state_seeded, block, 1); +#if SPX_SHA512 + sha512_inc_init(ctx->state_seeded_512); + sha512_inc_blocks(ctx->state_seeded_512, block, 1); +#endif +} diff --git a/Blastproof/initfsgen/sha2.h b/Blastproof/initfsgen/sha2.h new file mode 100644 index 0000000..732ab4b --- /dev/null +++ b/Blastproof/initfsgen/sha2.h @@ -0,0 +1,43 @@ +#ifndef SPX_SHA2_H +#define SPX_SHA2_H + +#include "params.h" + +#define SPX_SHA256_BLOCK_BYTES 64 +#define SPX_SHA256_OUTPUT_BYTES 32 /* This does not necessarily equal SPX_N */ + +#define SPX_SHA512_BLOCK_BYTES 128 +#define SPX_SHA512_OUTPUT_BYTES 64 + +#if SPX_SHA256_OUTPUT_BYTES < SPX_N + #error Linking against SHA-256 with N larger than 32 bytes is not supported +#endif + +#define SPX_SHA256_ADDR_BYTES 22 + +#include +#include + +void sha256_inc_init(uint8_t *state); +void sha256_inc_blocks(uint8_t *state, const uint8_t *in, size_t inblocks); +void sha256_inc_finalize(uint8_t *out, uint8_t *state, const uint8_t *in, size_t inlen); +void sha256(uint8_t *out, const uint8_t *in, size_t inlen); + +void sha512_inc_init(uint8_t *state); +void sha512_inc_blocks(uint8_t *state, const uint8_t *in, size_t inblocks); +void sha512_inc_finalize(uint8_t *out, uint8_t *state, const uint8_t *in, size_t inlen); +void sha512(uint8_t *out, const uint8_t *in, size_t inlen); + +#define mgf1_256 SPX_NAMESPACE(mgf1_256) +void mgf1_256(unsigned char *out, unsigned long outlen, + const unsigned char *in, unsigned long inlen); + +#define mgf1_512 SPX_NAMESPACE(mgf1_512) +void mgf1_512(unsigned char *out, unsigned long outlen, + const unsigned char *in, unsigned long inlen); + +#define seed_state SPX_NAMESPACE(seed_state) +void seed_state(spx_ctx *ctx); + + +#endif diff --git a/Blastproof/initfsgen/sha2_offsets.h b/Blastproof/initfsgen/sha2_offsets.h new file mode 100644 index 0000000..49f7e85 --- /dev/null +++ b/Blastproof/initfsgen/sha2_offsets.h @@ -0,0 +1,20 @@ +#ifndef SHA2_OFFSETS_H_ +#define SHA2_OFFSETS_H_ + +/* + * Offsets of various fields in the address structure when we use SHA2 as + * the Sphincs+ hash function + */ + +#define SPX_OFFSET_LAYER 0 /* The byte used to specify the Merkle tree layer */ +#define SPX_OFFSET_TREE 1 /* The start of the 8 byte field used to specify the tree */ +#define SPX_OFFSET_TYPE 9 /* The byte used to specify the hash type (reason) */ +#define SPX_OFFSET_KP_ADDR 10 /* The start of the 4 byte field used to specify the key pair address */ +#define SPX_OFFSET_CHAIN_ADDR 17 /* The byte used to specify the chain address (which Winternitz chain) */ +#define SPX_OFFSET_HASH_ADDR 21 /* The byte used to specify the hash address (where in the Winternitz chain) */ +#define SPX_OFFSET_TREE_HGT 17 /* The byte used to specify the height of this node in the FORS or Merkle tree */ +#define SPX_OFFSET_TREE_INDEX 18 /* The start of the 4 byte field used to specify the node in the FORS or Merkle tree */ + +#define SPX_SHA2 1 + +#endif /* SHA2_OFFSETS_H_ */ diff --git a/Blastproof/initfsgen/sha3.c b/Blastproof/initfsgen/sha3.c new file mode 100644 index 0000000..fb4ef05 --- /dev/null +++ b/Blastproof/initfsgen/sha3.c @@ -0,0 +1,190 @@ +// sha3.c +// 19-Nov-11 Markku-Juhani O. Saarinen + +// Revised 07-Aug-15 to match with official release of FIPS PUB 202 "SHA3" +// Revised 03-Sep-15 for portability + OpenSSL - style API + +#include "sha3.h" + +// update the state with given number of rounds + +void sha3_keccakf(uint64_t st[25]) +{ + // constants + const uint64_t keccakf_rndc[24] = { + 0x0000000000000001, 0x0000000000008082, 0x800000000000808a, + 0x8000000080008000, 0x000000000000808b, 0x0000000080000001, + 0x8000000080008081, 0x8000000000008009, 0x000000000000008a, + 0x0000000000000088, 0x0000000080008009, 0x000000008000000a, + 0x000000008000808b, 0x800000000000008b, 0x8000000000008089, + 0x8000000000008003, 0x8000000000008002, 0x8000000000000080, + 0x000000000000800a, 0x800000008000000a, 0x8000000080008081, + 0x8000000000008080, 0x0000000080000001, 0x8000000080008008 + }; + const int keccakf_rotc[24] = { + 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, + 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44 + }; + const int keccakf_piln[24] = { + 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, + 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1 + }; + + // variables + int i, j, r; + uint64_t t, bc[5]; + +#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ + uint8_t *v; + + // endianess conversion. this is redundant on little-endian targets + for (i = 0; i < 25; i++) { + v = (uint8_t *) &st[i]; + st[i] = ((uint64_t) v[0]) | (((uint64_t) v[1]) << 8) | + (((uint64_t) v[2]) << 16) | (((uint64_t) v[3]) << 24) | + (((uint64_t) v[4]) << 32) | (((uint64_t) v[5]) << 40) | + (((uint64_t) v[6]) << 48) | (((uint64_t) v[7]) << 56); + } +#endif + + // actual iteration + for (r = 0; r < KECCAKF_ROUNDS; r++) { + + // Theta + for (i = 0; i < 5; i++) + bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20]; + + for (i = 0; i < 5; i++) { + t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1); + for (j = 0; j < 25; j += 5) + st[j + i] ^= t; + } + + // Rho Pi + t = st[1]; + for (i = 0; i < 24; i++) { + j = keccakf_piln[i]; + bc[0] = st[j]; + st[j] = ROTL64(t, keccakf_rotc[i]); + t = bc[0]; + } + + // Chi + for (j = 0; j < 25; j += 5) { + for (i = 0; i < 5; i++) + bc[i] = st[j + i]; + for (i = 0; i < 5; i++) + st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5]; + } + + // Iota + st[0] ^= keccakf_rndc[r]; + } + +#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ + // endianess conversion. this is redundant on little-endian targets + for (i = 0; i < 25; i++) { + v = (uint8_t *) &st[i]; + t = st[i]; + v[0] = t & 0xFF; + v[1] = (t >> 8) & 0xFF; + v[2] = (t >> 16) & 0xFF; + v[3] = (t >> 24) & 0xFF; + v[4] = (t >> 32) & 0xFF; + v[5] = (t >> 40) & 0xFF; + v[6] = (t >> 48) & 0xFF; + v[7] = (t >> 56) & 0xFF; + } +#endif +} + +// Initialize the context for SHA3 + +int sha3_init(sha3_ctx_t *c, int mdlen) +{ + int i; + + for (i = 0; i < 25; i++) + c->st.q[i] = 0; + c->mdlen = mdlen; + c->rsiz = 200 - 2 * mdlen; + c->pt = 0; + + return 1; +} + +// update state with more data + +int sha3_update(sha3_ctx_t *c, const void *data, size_t len) +{ + size_t i; + int j; + + j = c->pt; + for (i = 0; i < len; i++) { + c->st.b[j++] ^= ((const uint8_t *) data)[i]; + if (j >= c->rsiz) { + sha3_keccakf(c->st.q); + j = 0; + } + } + c->pt = j; + + return 1; +} + +// finalize and output a hash + +int sha3_final(void *md, sha3_ctx_t *c) +{ + int i; + + c->st.b[c->pt] ^= 0x06; + c->st.b[c->rsiz - 1] ^= 0x80; + sha3_keccakf(c->st.q); + + for (i = 0; i < c->mdlen; i++) { + ((uint8_t *) md)[i] = c->st.b[i]; + } + + return 1; +} + +// compute a SHA-3 hash (md) of given byte length from "in" + +void *sha3(const void *in, size_t inlen, void *md, int mdlen) +{ + sha3_ctx_t sha3; + + sha3_init(&sha3, mdlen); + sha3_update(&sha3, in, inlen); + sha3_final(md, &sha3); + + return md; +} + +// SHAKE128 and SHAKE256 extensible-output functionality + +void shake_xof(sha3_ctx_t *c) +{ + c->st.b[c->pt] ^= 0x1F; + c->st.b[c->rsiz - 1] ^= 0x80; + sha3_keccakf(c->st.q); + c->pt = 0; +} + +void shake_out(sha3_ctx_t *c, void *out, size_t len) +{ + size_t i; + int j; + + j = c->pt; + for (i = 0; i < len; i++) { + if (j >= c->rsiz) { + sha3_keccakf(c->st.q); + j = 0; + } + ((uint8_t *) out)[i] = c->st.b[j++]; + } + c->pt = j; +} diff --git a/Blastproof/initfsgen/sha3.h b/Blastproof/initfsgen/sha3.h new file mode 100644 index 0000000..4431c49 --- /dev/null +++ b/Blastproof/initfsgen/sha3.h @@ -0,0 +1,46 @@ +// sha3.h +// 19-Nov-11 Markku-Juhani O. Saarinen + +#ifndef SHA3_H +#define SHA3_H + +#include +#include + +#ifndef KECCAKF_ROUNDS +#define KECCAKF_ROUNDS 24 +#endif + +#ifndef ROTL64 +#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y)))) +#endif + +// state context +typedef struct { + union { // state: + uint8_t b[200]; // 8-bit bytes + uint64_t q[25]; // 64-bit words + } st; + int pt, rsiz, mdlen; // these don't overflow +} sha3_ctx_t; + +// Compression function. +void sha3_keccakf(uint64_t st[25]); + +// OpenSSL - like interfece +int sha3_init(sha3_ctx_t *c, int mdlen); // mdlen = hash output in bytes +int sha3_update(sha3_ctx_t *c, const void *data, size_t len); +int sha3_final(void *md, sha3_ctx_t *c); // digest goes to md + +// compute a sha3 hash (md) of given byte length from "in" +void *sha3(const void *in, size_t inlen, void *md, int mdlen); + +// SHAKE128 and SHAKE256 extensible-output functions +#define shake128_init(c) sha3_init(c, 16) +#define shake256_init(c) sha3_init(c, 32) +#define shake_update sha3_update + +void shake_xof(sha3_ctx_t *c); +void shake_out(sha3_ctx_t *c, void *out, size_t len); + +#endif diff --git a/Blastproof/initfsgen/sign.c b/Blastproof/initfsgen/sign.c new file mode 100644 index 0000000..a8e0c3c --- /dev/null +++ b/Blastproof/initfsgen/sign.c @@ -0,0 +1,287 @@ +#include +#include +#include + +#include "api.h" +#include "params.h" +#include "wots.h" +#include "fors.h" +#include "hash.h" +#include "thash.h" +#include "address.h" +#include "randombytes.h" +#include "utils.h" +#include "merkle.h" + +/* + * Returns the length of a secret key, in bytes + */ +unsigned long long crypto_sign_secretkeybytes(void) +{ + return CRYPTO_SECRETKEYBYTES; +} + +/* + * Returns the length of a public key, in bytes + */ +unsigned long long crypto_sign_publickeybytes(void) +{ + return CRYPTO_PUBLICKEYBYTES; +} + +/* + * Returns the length of a signature, in bytes + */ +unsigned long long crypto_sign_bytes(void) +{ + return CRYPTO_BYTES; +} + +/* + * Returns the length of the seed required to generate a key pair, in bytes + */ +unsigned long long crypto_sign_seedbytes(void) +{ + return CRYPTO_SEEDBYTES; +} + +/* + * Generates an SPX key pair given a seed of length + * Format sk: [SK_SEED || SK_PRF || PUB_SEED || root] + * Format pk: [PUB_SEED || root] + */ +int crypto_sign_seed_keypair(unsigned char *pk, unsigned char *sk, + const unsigned char *seed) +{ + spx_ctx ctx; + + /* Initialize SK_SEED, SK_PRF and PUB_SEED from seed. */ + memcpy(sk, seed, CRYPTO_SEEDBYTES); + + memcpy(pk, sk + 2*SPX_N, SPX_N); + + memcpy(ctx.pub_seed, pk, SPX_N); + memcpy(ctx.sk_seed, sk, SPX_N); + + /* This hook allows the hash function instantiation to do whatever + preparation or computation it needs, based on the public seed. */ + initialize_hash_function(&ctx); + + /* Compute root node of the top-most subtree. */ + merkle_gen_root(sk + 3*SPX_N, &ctx); + + memcpy(pk + SPX_N, sk + 3*SPX_N, SPX_N); + + return 0; +} + +/* + * Generates an SPX key pair. + * Format sk: [SK_SEED || SK_PRF || PUB_SEED || root] + * Format pk: [PUB_SEED || root] + */ +int crypto_sign_keypair(unsigned char *pk, unsigned char *sk) +{ + unsigned char seed[CRYPTO_SEEDBYTES]; + randombytes(seed, CRYPTO_SEEDBYTES); + crypto_sign_seed_keypair(pk, sk, seed); + + return 0; +} + +/** + * Returns an array containing a detached signature. + */ +int crypto_sign_signature(uint8_t *sig, size_t *siglen, + const uint8_t *m, size_t mlen, const uint8_t *sk) +{ + spx_ctx ctx; + + const unsigned char *sk_prf = sk + SPX_N; + const unsigned char *pk = sk + 2*SPX_N; + + unsigned char optrand[SPX_N]; + unsigned char mhash[SPX_FORS_MSG_BYTES]; + unsigned char root[SPX_N]; + uint32_t i; + uint64_t tree; + uint32_t idx_leaf; + uint32_t wots_addr[8] = {0}; + uint32_t tree_addr[8] = {0}; + + memcpy(ctx.sk_seed, sk, SPX_N); + memcpy(ctx.pub_seed, pk, SPX_N); + + /* This hook allows the hash function instantiation to do whatever + preparation or computation it needs, based on the public seed. */ + initialize_hash_function(&ctx); + + set_type(wots_addr, SPX_ADDR_TYPE_WOTS); + set_type(tree_addr, SPX_ADDR_TYPE_HASHTREE); + + /* Optionally, signing can be made non-deterministic using optrand. + This can help counter side-channel attacks that would benefit from + getting a large number of traces when the signer uses the same nodes. */ + randombytes(optrand, SPX_N); + /* Compute the digest randomization value. */ + gen_message_random(sig, sk_prf, optrand, m, mlen, &ctx); + + /* Derive the message digest and leaf index from R, PK and M. */ + hash_message(mhash, &tree, &idx_leaf, sig, pk, m, mlen, &ctx); + sig += SPX_N; + + set_tree_addr(wots_addr, tree); + set_keypair_addr(wots_addr, idx_leaf); + + /* Sign the message hash using FORS. */ + fors_sign(sig, root, mhash, &ctx, wots_addr); + sig += SPX_FORS_BYTES; + + for (i = 0; i < SPX_D; i++) { + set_layer_addr(tree_addr, i); + set_tree_addr(tree_addr, tree); + + copy_subtree_addr(wots_addr, tree_addr); + set_keypair_addr(wots_addr, idx_leaf); + + merkle_sign(sig, root, &ctx, wots_addr, tree_addr, idx_leaf); + sig += SPX_WOTS_BYTES + SPX_TREE_HEIGHT * SPX_N; + + /* Update the indices for the next layer. */ + idx_leaf = (tree & ((1 << SPX_TREE_HEIGHT)-1)); + tree = tree >> SPX_TREE_HEIGHT; + } + + *siglen = SPX_BYTES; + + return 0; +} + +/** + * Verifies a detached signature and message under a given public key. + */ +int crypto_sign_verify(const uint8_t *sig, size_t siglen, + const uint8_t *m, size_t mlen, const uint8_t *pk) +{ + spx_ctx ctx; + const unsigned char *pub_root = pk + SPX_N; + unsigned char mhash[SPX_FORS_MSG_BYTES]; + unsigned char wots_pk[SPX_WOTS_BYTES]; + unsigned char root[SPX_N]; + unsigned char leaf[SPX_N]; + unsigned int i; + uint64_t tree; + uint32_t idx_leaf; + uint32_t wots_addr[8] = {0}; + uint32_t tree_addr[8] = {0}; + uint32_t wots_pk_addr[8] = {0}; + + if (siglen != SPX_BYTES) { + return -1; + } + + memcpy(ctx.pub_seed, pk, SPX_N); + + /* This hook allows the hash function instantiation to do whatever + preparation or computation it needs, based on the public seed. */ + initialize_hash_function(&ctx); + + set_type(wots_addr, SPX_ADDR_TYPE_WOTS); + set_type(tree_addr, SPX_ADDR_TYPE_HASHTREE); + set_type(wots_pk_addr, SPX_ADDR_TYPE_WOTSPK); + + /* Derive the message digest and leaf index from R || PK || M. */ + /* The additional SPX_N is a result of the hash domain separator. */ + hash_message(mhash, &tree, &idx_leaf, sig, pk, m, mlen, &ctx); + sig += SPX_N; + + /* Layer correctly defaults to 0, so no need to set_layer_addr */ + set_tree_addr(wots_addr, tree); + set_keypair_addr(wots_addr, idx_leaf); + + fors_pk_from_sig(root, sig, mhash, &ctx, wots_addr); + sig += SPX_FORS_BYTES; + + /* For each subtree.. */ + for (i = 0; i < SPX_D; i++) { + set_layer_addr(tree_addr, i); + set_tree_addr(tree_addr, tree); + + copy_subtree_addr(wots_addr, tree_addr); + set_keypair_addr(wots_addr, idx_leaf); + + copy_keypair_addr(wots_pk_addr, wots_addr); + + /* The WOTS public key is only correct if the signature was correct. */ + /* Initially, root is the FORS pk, but on subsequent iterations it is + the root of the subtree below the currently processed subtree. */ + wots_pk_from_sig(wots_pk, sig, root, &ctx, wots_addr); + sig += SPX_WOTS_BYTES; + + /* Compute the leaf node using the WOTS public key. */ + thash(leaf, wots_pk, SPX_WOTS_LEN, &ctx, wots_pk_addr); + + /* Compute the root node of this subtree. */ + compute_root(root, leaf, idx_leaf, 0, sig, SPX_TREE_HEIGHT, + &ctx, tree_addr); + sig += SPX_TREE_HEIGHT * SPX_N; + + /* Update the indices for the next layer. */ + idx_leaf = (tree & ((1 << SPX_TREE_HEIGHT)-1)); + tree = tree >> SPX_TREE_HEIGHT; + } + + /* Check if the root node equals the root node in the public key. */ + if (memcmp(root, pub_root, SPX_N)) { + return -1; + } + + return 0; +} + + +/** + * Returns an array containing the signature followed by the message. + */ +int crypto_sign(unsigned char *sm, unsigned long long *smlen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *sk) +{ + size_t siglen; + + crypto_sign_signature(sm, &siglen, m, (size_t)mlen, sk); + + memmove(sm + SPX_BYTES, m, mlen); + *smlen = siglen + mlen; + + return 0; +} + +/** + * Verifies a given signature-message pair under a given public key. + */ +int crypto_sign_open(unsigned char *m, unsigned long long *mlen, + const unsigned char *sm, unsigned long long smlen, + const unsigned char *pk) +{ + /* The API caller does not necessarily know what size a signature should be + but SPHINCS+ signatures are always exactly SPX_BYTES. */ + if (smlen < SPX_BYTES) { + memset(m, 0, smlen); + *mlen = 0; + return -1; + } + + *mlen = smlen - SPX_BYTES; + + if (crypto_sign_verify(sm, SPX_BYTES, sm + SPX_BYTES, (size_t)*mlen, pk)) { + memset(m, 0, smlen); + *mlen = 0; + return -1; + } + + /* If verification was successful, move the message to the right place. */ + memmove(m, sm + SPX_BYTES, *mlen); + + return 0; +} diff --git a/Blastproof/initfsgen/thash.h b/Blastproof/initfsgen/thash.h new file mode 100644 index 0000000..8687ccf --- /dev/null +++ b/Blastproof/initfsgen/thash.h @@ -0,0 +1,13 @@ +#ifndef SPX_THASH_H +#define SPX_THASH_H + +#include "context.h" +#include "params.h" + +#include + +#define thash SPX_NAMESPACE(thash) +void thash(unsigned char *out, const unsigned char *in, unsigned int inblocks, + const spx_ctx *ctx, uint32_t addr[8]); + +#endif diff --git a/Blastproof/initfsgen/thash_sha2_robust.c b/Blastproof/initfsgen/thash_sha2_robust.c new file mode 100644 index 0000000..67ca3da --- /dev/null +++ b/Blastproof/initfsgen/thash_sha2_robust.c @@ -0,0 +1,74 @@ +#include +#include + +#include "thash.h" +#include "address.h" +#include "params.h" +#include "utils.h" +#include "sha2.h" + +#if SPX_SHA512 +static void thash_512(unsigned char *out, const unsigned char *in, unsigned int inblocks, + const spx_ctx *ctx, uint32_t addr[8]); +#endif + +/** + * Takes an array of inblocks concatenated arrays of SPX_N bytes. + */ +void thash(unsigned char *out, const unsigned char *in, unsigned int inblocks, + const spx_ctx *ctx, uint32_t addr[8]) +{ +#if SPX_SHA512 + if (inblocks > 1) { + thash_512(out, in, inblocks, ctx, addr); + return; + } +#endif + unsigned char outbuf[SPX_SHA256_OUTPUT_BYTES]; + SPX_VLA(uint8_t, bitmask, inblocks * SPX_N); + SPX_VLA(uint8_t, buf, SPX_N + SPX_SHA256_OUTPUT_BYTES + inblocks*SPX_N); + uint8_t sha2_state[40]; + unsigned int i; + + memcpy(buf, ctx->pub_seed, SPX_N); + memcpy(buf + SPX_N, addr, SPX_SHA256_ADDR_BYTES); + mgf1_256(bitmask, inblocks * SPX_N, buf, SPX_N + SPX_SHA256_ADDR_BYTES); + + /* Retrieve precomputed state containing pub_seed */ + memcpy(sha2_state, ctx->state_seeded, 40 * sizeof(uint8_t)); + + for (i = 0; i < inblocks * SPX_N; i++) { + buf[SPX_N + SPX_SHA256_ADDR_BYTES + i] = in[i] ^ bitmask[i]; + } + + sha256_inc_finalize(outbuf, sha2_state, buf + SPX_N, + SPX_SHA256_ADDR_BYTES + inblocks*SPX_N); + memcpy(out, outbuf, SPX_N); +} + +#if SPX_SHA512 +static void thash_512(unsigned char *out, const unsigned char *in, unsigned int inblocks, + const spx_ctx *ctx, uint32_t addr[8]) +{ + unsigned char outbuf[SPX_SHA512_OUTPUT_BYTES]; + SPX_VLA(uint8_t, bitmask, inblocks * SPX_N); + SPX_VLA(uint8_t, buf, SPX_N + SPX_SHA256_ADDR_BYTES + inblocks*SPX_N); + uint8_t sha2_state[72]; + unsigned int i; + + memcpy(buf, ctx->pub_seed, SPX_N); + memcpy(buf + SPX_N, addr, SPX_SHA256_ADDR_BYTES); + mgf1_512(bitmask, inblocks * SPX_N, buf, SPX_N + SPX_SHA256_ADDR_BYTES); + + /* Retrieve precomputed state containing pub_seed */ + memcpy(sha2_state, ctx->state_seeded_512, 72 * sizeof(uint8_t)); + + for (i = 0; i < inblocks * SPX_N; i++) { + buf[SPX_N + SPX_SHA256_ADDR_BYTES + i] = in[i] ^ bitmask[i]; + } + + sha512_inc_finalize(outbuf, sha2_state, buf + SPX_N, + SPX_SHA256_ADDR_BYTES + inblocks*SPX_N); + memcpy(out, outbuf, SPX_N); +} +#endif diff --git a/Blastproof/initfsgen/thash_sha2_simple.c b/Blastproof/initfsgen/thash_sha2_simple.c new file mode 100644 index 0000000..da58896 --- /dev/null +++ b/Blastproof/initfsgen/thash_sha2_simple.c @@ -0,0 +1,59 @@ +#include +#include + +#include "thash.h" +#include "address.h" +#include "params.h" +#include "utils.h" +#include "sha2.h" + +#if SPX_SHA512 +static void thash_512(unsigned char *out, const unsigned char *in, unsigned int inblocks, + const spx_ctx *ctx, uint32_t addr[8]); +#endif + +/** + * Takes an array of inblocks concatenated arrays of SPX_N bytes. + */ +void thash(unsigned char *out, const unsigned char *in, unsigned int inblocks, + const spx_ctx *ctx, uint32_t addr[8]) +{ +#if SPX_SHA512 + if (inblocks > 1) { + thash_512(out, in, inblocks, ctx, addr); + return; + } +#endif + + unsigned char outbuf[SPX_SHA256_OUTPUT_BYTES]; + uint8_t sha2_state[40]; + SPX_VLA(uint8_t, buf, SPX_SHA256_ADDR_BYTES + inblocks*SPX_N); + + /* Retrieve precomputed state containing pub_seed */ + memcpy(sha2_state, ctx->state_seeded, 40 * sizeof(uint8_t)); + + memcpy(buf, addr, SPX_SHA256_ADDR_BYTES); + memcpy(buf + SPX_SHA256_ADDR_BYTES, in, inblocks * SPX_N); + + sha256_inc_finalize(outbuf, sha2_state, buf, SPX_SHA256_ADDR_BYTES + inblocks*SPX_N); + memcpy(out, outbuf, SPX_N); +} + +#if SPX_SHA512 +static void thash_512(unsigned char *out, const unsigned char *in, unsigned int inblocks, + const spx_ctx *ctx, uint32_t addr[8]) +{ + unsigned char outbuf[SPX_SHA512_OUTPUT_BYTES]; + uint8_t sha2_state[72]; + SPX_VLA(uint8_t, buf, SPX_SHA256_ADDR_BYTES + inblocks*SPX_N); + + /* Retrieve precomputed state containing pub_seed */ + memcpy(sha2_state, ctx->state_seeded_512, 72 * sizeof(uint8_t)); + + memcpy(buf, addr, SPX_SHA256_ADDR_BYTES); + memcpy(buf + SPX_SHA256_ADDR_BYTES, in, inblocks * SPX_N); + + sha512_inc_finalize(outbuf, sha2_state, buf, SPX_SHA256_ADDR_BYTES + inblocks*SPX_N); + memcpy(out, outbuf, SPX_N); +} +#endif diff --git a/Blastproof/initfsgen/utils.c b/Blastproof/initfsgen/utils.c new file mode 100644 index 0000000..63d52ee --- /dev/null +++ b/Blastproof/initfsgen/utils.c @@ -0,0 +1,154 @@ +#include + +#include "utils.h" +#include "params.h" +#include "hash.h" +#include "thash.h" +#include "address.h" + +/** + * Converts the value of 'in' to 'outlen' bytes in big-endian byte order. + */ +void ull_to_bytes(unsigned char *out, unsigned int outlen, + unsigned long long in) +{ + int i; + + /* Iterate over out in decreasing order, for big-endianness. */ + for (i = (signed int)outlen - 1; i >= 0; i--) { + out[i] = in & 0xff; + in = in >> 8; + } +} + +void u32_to_bytes(unsigned char *out, uint32_t in) +{ + out[0] = (unsigned char)(in >> 24); + out[1] = (unsigned char)(in >> 16); + out[2] = (unsigned char)(in >> 8); + out[3] = (unsigned char)in; +} + +/** + * Converts the inlen bytes in 'in' from big-endian byte order to an integer. + */ +unsigned long long bytes_to_ull(const unsigned char *in, unsigned int inlen) +{ + unsigned long long retval = 0; + unsigned int i; + + for (i = 0; i < inlen; i++) { + retval |= ((unsigned long long)in[i]) << (8*(inlen - 1 - i)); + } + return retval; +} + +/** + * Computes a root node given a leaf and an auth path. + * Expects address to be complete other than the tree_height and tree_index. + */ +void compute_root(unsigned char *root, const unsigned char *leaf, + uint32_t leaf_idx, uint32_t idx_offset, + const unsigned char *auth_path, uint32_t tree_height, + const spx_ctx *ctx, uint32_t addr[8]) +{ + uint32_t i; + unsigned char buffer[2 * SPX_N]; + + /* If leaf_idx is odd (last bit = 1), current path element is a right child + and auth_path has to go left. Otherwise it is the other way around. */ + if (leaf_idx & 1) { + memcpy(buffer + SPX_N, leaf, SPX_N); + memcpy(buffer, auth_path, SPX_N); + } + else { + memcpy(buffer, leaf, SPX_N); + memcpy(buffer + SPX_N, auth_path, SPX_N); + } + auth_path += SPX_N; + + for (i = 0; i < tree_height - 1; i++) { + leaf_idx >>= 1; + idx_offset >>= 1; + /* Set the address of the node we're creating. */ + set_tree_height(addr, i + 1); + set_tree_index(addr, leaf_idx + idx_offset); + + /* Pick the right or left neighbor, depending on parity of the node. */ + if (leaf_idx & 1) { + thash(buffer + SPX_N, buffer, 2, ctx, addr); + memcpy(buffer, auth_path, SPX_N); + } + else { + thash(buffer, buffer, 2, ctx, addr); + memcpy(buffer + SPX_N, auth_path, SPX_N); + } + auth_path += SPX_N; + } + + /* The last iteration is exceptional; we do not copy an auth_path node. */ + leaf_idx >>= 1; + idx_offset >>= 1; + set_tree_height(addr, tree_height); + set_tree_index(addr, leaf_idx + idx_offset); + thash(root, buffer, 2, ctx, addr); +} + +/** + * For a given leaf index, computes the authentication path and the resulting + * root node using Merkle's TreeHash algorithm. + * Expects the layer and tree parts of the tree_addr to be set, as well as the + * tree type (i.e. SPX_ADDR_TYPE_HASHTREE or SPX_ADDR_TYPE_FORSTREE). + * Applies the offset idx_offset to indices before building addresses, so that + * it is possible to continue counting indices across trees. + */ +void treehash(unsigned char *root, unsigned char *auth_path, const spx_ctx* ctx, + uint32_t leaf_idx, uint32_t idx_offset, uint32_t tree_height, + void (*gen_leaf)( + unsigned char* /* leaf */, + const spx_ctx* /* ctx */, + uint32_t /* addr_idx */, const uint32_t[8] /* tree_addr */), + uint32_t tree_addr[8]) +{ + SPX_VLA(uint8_t, stack, (tree_height+1)*SPX_N); + SPX_VLA(unsigned int, heights, tree_height+1); + unsigned int offset = 0; + uint32_t idx; + uint32_t tree_idx; + + for (idx = 0; idx < (uint32_t)(1 << tree_height); idx++) { + /* Add the next leaf node to the stack. */ + gen_leaf(stack + offset*SPX_N, ctx, idx + idx_offset, tree_addr); + offset++; + heights[offset - 1] = 0; + + /* If this is a node we need for the auth path.. */ + if ((leaf_idx ^ 0x1) == idx) { + memcpy(auth_path, stack + (offset - 1)*SPX_N, SPX_N); + } + + /* While the top-most nodes are of equal height.. */ + while (offset >= 2 && heights[offset - 1] == heights[offset - 2]) { + /* Compute index of the new node, in the next layer. */ + tree_idx = (idx >> (heights[offset - 1] + 1)); + + /* Set the address of the node we're creating. */ + set_tree_height(tree_addr, heights[offset - 1] + 1); + set_tree_index(tree_addr, + tree_idx + (idx_offset >> (heights[offset-1] + 1))); + /* Hash the top-most nodes from the stack together. */ + thash(stack + (offset - 2)*SPX_N, + stack + (offset - 2)*SPX_N, 2, ctx, tree_addr); + offset--; + /* Note that the top-most node is now one layer higher. */ + heights[offset - 1]++; + + /* If this is a node we need for the auth path.. */ + if (((leaf_idx >> heights[offset - 1]) ^ 0x1) == tree_idx) { + memcpy(auth_path + heights[offset - 1]*SPX_N, + stack + (offset - 1)*SPX_N, SPX_N); + } + } + } + memcpy(root, stack, SPX_N); +} diff --git a/Blastproof/initfsgen/utils.h b/Blastproof/initfsgen/utils.h new file mode 100644 index 0000000..b13502c --- /dev/null +++ b/Blastproof/initfsgen/utils.h @@ -0,0 +1,64 @@ +#ifndef SPX_UTILS_H +#define SPX_UTILS_H + +#include +#include "params.h" +#include "context.h" + + +/* To support MSVC use alloca() instead of VLAs. See #20. */ +#ifdef _MSC_VER +/* MSVC defines _alloca in malloc.h */ +# include +/* Note: _malloca(), which is recommended over deprecated _alloca, + requires that you call _freea(). So we stick with _alloca */ +# define SPX_VLA(__t,__x,__s) __t *__x = (__t*)_alloca((__s)*sizeof(__t)) +#else +# define SPX_VLA(__t,__x,__s) __t __x[__s] +#endif + +/** + * Converts the value of 'in' to 'outlen' bytes in big-endian byte order. + */ +#define ull_to_bytes SPX_NAMESPACE(ull_to_bytes) +void ull_to_bytes(unsigned char *out, unsigned int outlen, + unsigned long long in); +#define u32_to_bytes SPX_NAMESPACE(u32_to_bytes) +void u32_to_bytes(unsigned char *out, uint32_t in); + +/** + * Converts the inlen bytes in 'in' from big-endian byte order to an integer. + */ +#define bytes_to_ull SPX_NAMESPACE(bytes_to_ull) +unsigned long long bytes_to_ull(const unsigned char *in, unsigned int inlen); + +/** + * Computes a root node given a leaf and an auth path. + * Expects address to be complete other than the tree_height and tree_index. + */ +#define compute_root SPX_NAMESPACE(compute_root) +void compute_root(unsigned char *root, const unsigned char *leaf, + uint32_t leaf_idx, uint32_t idx_offset, + const unsigned char *auth_path, uint32_t tree_height, + const spx_ctx *ctx, uint32_t addr[8]); + +/** + * For a given leaf index, computes the authentication path and the resulting + * root node using Merkle's TreeHash algorithm. + * Expects the layer and tree parts of the tree_addr to be set, as well as the + * tree type (i.e. SPX_ADDR_TYPE_HASHTREE or SPX_ADDR_TYPE_FORSTREE). + * Applies the offset idx_offset to indices before building addresses, so that + * it is possible to continue counting indices across trees. + */ +#define treehash SPX_NAMESPACE(treehash) +void treehash(unsigned char *root, unsigned char *auth_path, + const spx_ctx* ctx, + uint32_t leaf_idx, uint32_t idx_offset, uint32_t tree_height, + void (*gen_leaf)( + unsigned char* /* leaf */, + const spx_ctx* ctx /* ctx */, + uint32_t /* addr_idx */, const uint32_t[8] /* tree_addr */), + uint32_t tree_addr[8]); + + +#endif diff --git a/Blastproof/initfsgen/utilsx1.c b/Blastproof/initfsgen/utilsx1.c new file mode 100644 index 0000000..f6a6700 --- /dev/null +++ b/Blastproof/initfsgen/utilsx1.c @@ -0,0 +1,100 @@ +#include + +#include "utils.h" +#include "utilsx1.h" +#include "params.h" +#include "thash.h" +#include "address.h" + +/* + * Generate the entire Merkle tree, computing the authentication path for + * leaf_idx, and the resulting root node using Merkle's TreeHash algorithm. + * Expects the layer and tree parts of the tree_addr to be set, as well as the + * tree type (i.e. SPX_ADDR_TYPE_HASHTREE or SPX_ADDR_TYPE_FORSTREE) + * + * This expects tree_addr to be initialized to the addr structures for the + * Merkle tree nodes + * + * Applies the offset idx_offset to indices before building addresses, so that + * it is possible to continue counting indices across trees. + * + * This works by using the standard Merkle tree building algorithm, + */ +void treehashx1(unsigned char *root, unsigned char *auth_path, + const spx_ctx* ctx, + uint32_t leaf_idx, uint32_t idx_offset, + uint32_t tree_height, + void (*gen_leaf)( + unsigned char* /* Where to write the leaves */, + const spx_ctx* /* ctx */, + uint32_t idx, void *info), + uint32_t tree_addr[8], + void *info) +{ + /* This is where we keep the intermediate nodes */ + SPX_VLA(uint8_t, stack, tree_height*SPX_N); + + uint32_t idx; + uint32_t max_idx = (uint32_t)((1 << tree_height) - 1); + for (idx = 0;; idx++) { + unsigned char current[2*SPX_N]; /* Current logical node is at */ + /* index[SPX_N]. We do this to minimize the number of copies */ + /* needed during a thash */ + gen_leaf( ¤t[SPX_N], ctx, idx + idx_offset, + info ); + + /* Now combine the freshly generated right node with previously */ + /* generated left ones */ + uint32_t internal_idx_offset = idx_offset; + uint32_t internal_idx = idx; + uint32_t internal_leaf = leaf_idx; + uint32_t h; /* The height we are in the Merkle tree */ + for (h=0;; h++, internal_idx >>= 1, internal_leaf >>= 1) { + + /* Check if we hit the top of the tree */ + if (h == tree_height) { + /* We hit the root; return it */ + memcpy( root, ¤t[SPX_N], SPX_N ); + return; + } + + /* + * Check if the node we have is a part of the + * authentication path; if it is, write it out + */ + if ((internal_idx ^ internal_leaf) == 0x01) { + memcpy( &auth_path[ h * SPX_N ], + ¤t[SPX_N], + SPX_N ); + } + + /* + * Check if we're at a left child; if so, stop going up the stack + * Exception: if we've reached the end of the tree, keep on going + * (so we combine the last 4 nodes into the one root node in two + * more iterations) + */ + if ((internal_idx & 1) == 0 && idx < max_idx) { + break; + } + + /* Ok, we're at a right node */ + /* Now combine the left and right logical nodes together */ + + /* Set the address of the node we're creating. */ + internal_idx_offset >>= 1; + set_tree_height(tree_addr, h + 1); + set_tree_index(tree_addr, internal_idx/2 + internal_idx_offset ); + + unsigned char *left = &stack[h * SPX_N]; + memcpy( ¤t[0], left, SPX_N ); + thash( ¤t[1 * SPX_N], + ¤t[0 * SPX_N], + 2, ctx, tree_addr); + } + + /* We've hit a left child; save the current for when we get the */ + /* corresponding right right */ + memcpy( &stack[h * SPX_N], ¤t[SPX_N], SPX_N); + } +} diff --git a/Blastproof/initfsgen/utilsx1.h b/Blastproof/initfsgen/utilsx1.h new file mode 100644 index 0000000..a7fcf15 --- /dev/null +++ b/Blastproof/initfsgen/utilsx1.h @@ -0,0 +1,26 @@ +#ifndef SPX_UTILSX4_H +#define SPX_UTILSX4_H + +#include +#include "params.h" +#include "context.h" + +/** + * For a given leaf index, computes the authentication path and the resulting + * root node using Merkle's TreeHash algorithm. + * Expects the layer and tree parts of the tree_addr to be set, as well as the + * tree type (i.e. SPX_ADDR_TYPE_HASHTREE or SPX_ADDR_TYPE_FORSTREE). + * Applies the offset idx_offset to indices before building addresses, so that + * it is possible to continue counting indices across trees. + */ +#define treehashx1 SPX_NAMESPACE(treehashx1) +void treehashx1(unsigned char *root, unsigned char *auth_path, + const spx_ctx* ctx, + uint32_t leaf_idx, uint32_t idx_offset, uint32_t tree_height, + void (*gen_leaf)( + unsigned char* /* Where to write the leaf */, + const spx_ctx* /* ctx */, + uint32_t addr_idx, void *info), + uint32_t tree_addrx4[8], void *info); + +#endif diff --git a/Blastproof/initfsgen/wots.c b/Blastproof/initfsgen/wots.c new file mode 100644 index 0000000..df83278 --- /dev/null +++ b/Blastproof/initfsgen/wots.c @@ -0,0 +1,112 @@ +#include +#include + +#include "utils.h" +#include "utilsx1.h" +#include "hash.h" +#include "thash.h" +#include "wots.h" +#include "wotsx1.h" +#include "address.h" +#include "params.h" + +// TODO clarify address expectations, and make them more uniform. +// TODO i.e. do we expect types to be set already? +// TODO and do we expect modifications or copies? + +/** + * Computes the chaining function. + * out and in have to be n-byte arrays. + * + * Interprets in as start-th value of the chain. + * addr has to contain the address of the chain. + */ +static void gen_chain(unsigned char *out, const unsigned char *in, + unsigned int start, unsigned int steps, + const spx_ctx *ctx, uint32_t addr[8]) +{ + uint32_t i; + + /* Initialize out with the value at position 'start'. */ + memcpy(out, in, SPX_N); + + /* Iterate 'steps' calls to the hash function. */ + for (i = start; i < (start+steps) && i < SPX_WOTS_W; i++) { + set_hash_addr(addr, i); + thash(out, out, 1, ctx, addr); + } +} + +/** + * base_w algorithm as described in draft. + * Interprets an array of bytes as integers in base w. + * This only works when log_w is a divisor of 8. + */ +static void base_w(unsigned int *output, const int out_len, + const unsigned char *input) +{ + int in = 0; + int out = 0; + unsigned char total; + int bits = 0; + int consumed; + + for (consumed = 0; consumed < out_len; consumed++) { + if (bits == 0) { + total = input[in]; + in++; + bits += 8; + } + bits -= SPX_WOTS_LOGW; + output[out] = (total >> bits) & (SPX_WOTS_W - 1); + out++; + } +} + +/* Computes the WOTS+ checksum over a message (in base_w). */ +static void wots_checksum(unsigned int *csum_base_w, + const unsigned int *msg_base_w) +{ + unsigned int csum = 0; + unsigned char csum_bytes[(SPX_WOTS_LEN2 * SPX_WOTS_LOGW + 7) / 8]; + unsigned int i; + + /* Compute checksum. */ + for (i = 0; i < SPX_WOTS_LEN1; i++) { + csum += SPX_WOTS_W - 1 - msg_base_w[i]; + } + + /* Convert checksum to base_w. */ + /* Make sure expected empty zero bits are the least significant bits. */ + csum = csum << ((8 - ((SPX_WOTS_LEN2 * SPX_WOTS_LOGW) % 8)) % 8); + ull_to_bytes(csum_bytes, sizeof(csum_bytes), csum); + base_w(csum_base_w, SPX_WOTS_LEN2, csum_bytes); +} + +/* Takes a message and derives the matching chain lengths. */ +void chain_lengths(unsigned int *lengths, const unsigned char *msg) +{ + base_w(lengths, SPX_WOTS_LEN1, msg); + wots_checksum(lengths + SPX_WOTS_LEN1, lengths); +} + +/** + * Takes a WOTS signature and an n-byte message, computes a WOTS public key. + * + * Writes the computed public key to 'pk'. + */ +void wots_pk_from_sig(unsigned char *pk, + const unsigned char *sig, const unsigned char *msg, + const spx_ctx *ctx, uint32_t addr[8]) +{ + unsigned int lengths[SPX_WOTS_LEN]; + uint32_t i; + + chain_lengths(lengths, msg); + + for (i = 0; i < SPX_WOTS_LEN; i++) { + set_chain_addr(addr, i); + gen_chain(pk + i*SPX_N, sig + i*SPX_N, + lengths[i], SPX_WOTS_W - 1 - lengths[i], ctx, addr); + } +} diff --git a/Blastproof/initfsgen/wots.h b/Blastproof/initfsgen/wots.h new file mode 100644 index 0000000..7e77056 --- /dev/null +++ b/Blastproof/initfsgen/wots.h @@ -0,0 +1,25 @@ +#ifndef SPX_WOTS_H +#define SPX_WOTS_H + +#include + +#include "params.h" +#include "context.h" + +/** + * Takes a WOTS signature and an n-byte message, computes a WOTS public key. + * + * Writes the computed public key to 'pk'. + */ +#define wots_pk_from_sig SPX_NAMESPACE(wots_pk_from_sig) +void wots_pk_from_sig(unsigned char *pk, + const unsigned char *sig, const unsigned char *msg, + const spx_ctx *ctx, uint32_t addr[8]); + +/* + * Compute the chain lengths needed for a given message hash + */ +#define chain_lengths SPX_NAMESPACE(chain_lengths) +void chain_lengths(unsigned int *lengths, const unsigned char *msg); + +#endif diff --git a/Blastproof/initfsgen/wotsx1.c b/Blastproof/initfsgen/wotsx1.c new file mode 100644 index 0000000..dfb3780 --- /dev/null +++ b/Blastproof/initfsgen/wotsx1.c @@ -0,0 +1,73 @@ +#include +#include + +#include "utils.h" +#include "hash.h" +#include "thash.h" +#include "wots.h" +#include "wotsx1.h" +#include "address.h" +#include "params.h" + +/* + * This generates a WOTS public key + * It also generates the WOTS signature if leaf_info indicates + * that we're signing with this WOTS key + */ +void wots_gen_leafx1(unsigned char *dest, + const spx_ctx *ctx, + uint32_t leaf_idx, void *v_info) { + struct leaf_info_x1 *info = v_info; + uint32_t *leaf_addr = info->leaf_addr; + uint32_t *pk_addr = info->pk_addr; + unsigned int i, k; + unsigned char pk_buffer[ SPX_WOTS_BYTES ]; + unsigned char *buffer; + uint32_t wots_k_mask; + + if (leaf_idx == info->wots_sign_leaf) { + /* We're traversing the leaf that's signing; generate the WOTS */ + /* signature */ + wots_k_mask = 0; + } else { + /* Nope, we're just generating pk's; turn off the signature logic */ + wots_k_mask = (uint32_t)~0; + } + + set_keypair_addr( leaf_addr, leaf_idx ); + set_keypair_addr( pk_addr, leaf_idx ); + + for (i = 0, buffer = pk_buffer; i < SPX_WOTS_LEN; i++, buffer += SPX_N) { + uint32_t wots_k = info->wots_steps[i] | wots_k_mask; /* Set wots_k to */ + /* the step if we're generating a signature, ~0 if we're not */ + + /* Start with the secret seed */ + set_chain_addr(leaf_addr, i); + set_hash_addr(leaf_addr, 0); + set_type(leaf_addr, SPX_ADDR_TYPE_WOTSPRF); + + prf_addr(buffer, ctx, leaf_addr); + + set_type(leaf_addr, SPX_ADDR_TYPE_WOTS); + + /* Iterate down the WOTS chain */ + for (k=0;; k++) { + /* Check if this is the value that needs to be saved as a */ + /* part of the WOTS signature */ + if (k == wots_k) { + memcpy( info->wots_sig + i * SPX_N, buffer, SPX_N ); + } + + /* Check if we hit the top of the chain */ + if (k == SPX_WOTS_W - 1) break; + + /* Iterate one step on the chain */ + set_hash_addr(leaf_addr, k); + + thash(buffer, buffer, 1, ctx, leaf_addr); + } + } + + /* Do the final thash to generate the public keys */ + thash(dest, pk_buffer, SPX_WOTS_LEN, ctx, pk_addr); +} diff --git a/Blastproof/initfsgen/wotsx1.h b/Blastproof/initfsgen/wotsx1.h new file mode 100644 index 0000000..1257f81 --- /dev/null +++ b/Blastproof/initfsgen/wotsx1.h @@ -0,0 +1,36 @@ +#if !defined( WOTSX1_H_ ) +#define WOTSX1_H_ + +#include + +/* + * This is here to provide an interface to the internal wots_gen_leafx1 + * routine. While this routine is not referenced in the package outside of + * wots.c, it is called from the stand-alone benchmark code to characterize + * the performance + */ +struct leaf_info_x1 { + unsigned char *wots_sig; + uint32_t wots_sign_leaf; /* The index of the WOTS we're using to sign */ + uint32_t *wots_steps; + uint32_t leaf_addr[8]; + uint32_t pk_addr[8]; +}; + +/* Macro to set the leaf_info to something 'benign', that is, it would */ +/* run with the same time as it does during the real signing process */ +/* Used only by the benchmark code */ +#define INITIALIZE_LEAF_INFO_X1(info, addr, step_buffer) { \ + info.wots_sig = 0; \ + info.wots_sign_leaf = ~0u; \ + info.wots_steps = step_buffer; \ + memcpy( &info.leaf_addr[0], addr, 32 ); \ + memcpy( &info.pk_addr[0], addr, 32 ); \ +} + +#define wots_gen_leafx1 SPX_NAMESPACE(wots_gen_leafx1) +void wots_gen_leafx1(unsigned char *dest, + const spx_ctx *ctx, + uint32_t leaf_idx, void *v_info); + +#endif /* WOTSX1_H_ */ diff --git a/Blastproof/keygen/address.c b/Blastproof/keygen/address.c new file mode 100644 index 0000000..b136af8 --- /dev/null +++ b/Blastproof/keygen/address.c @@ -0,0 +1,104 @@ +#include +#include + +#include "address.h" +#include "params.h" +#include "utils.h" + +/* + * Specify which level of Merkle tree (the "layer") we're working on + */ +void set_layer_addr(uint32_t addr[8], uint32_t layer) +{ + ((unsigned char *)addr)[SPX_OFFSET_LAYER] = (unsigned char)layer; +} + +/* + * Specify which Merkle tree within the level (the "tree address") we're working on + */ +void set_tree_addr(uint32_t addr[8], uint64_t tree) +{ +#if (SPX_TREE_HEIGHT * (SPX_D - 1)) > 64 + #error Subtree addressing is currently limited to at most 2^64 trees +#endif + ull_to_bytes(&((unsigned char *)addr)[SPX_OFFSET_TREE], 8, tree ); +} + +/* + * Specify the reason we'll use this address structure for, that is, what + * hash will we compute with it. This is used so that unrelated types of + * hashes don't accidentally get the same address structure. The type will be + * one of the SPX_ADDR_TYPE constants + */ +void set_type(uint32_t addr[8], uint32_t type) +{ + ((unsigned char *)addr)[SPX_OFFSET_TYPE] = (unsigned char)type; +} + +/* + * Copy the layer and tree fields of the address structure. This is used + * when we're doing multiple types of hashes within the same Merkle tree + */ +void copy_subtree_addr(uint32_t out[8], const uint32_t in[8]) +{ + memcpy( out, in, SPX_OFFSET_TREE+8 ); +} + +/* These functions are used for OTS addresses. */ + +/* + * Specify which Merkle leaf we're working on; that is, which OTS keypair + * we're talking about. + */ +void set_keypair_addr(uint32_t addr[8], uint32_t keypair) +{ + u32_to_bytes(&((unsigned char *)addr)[SPX_OFFSET_KP_ADDR], keypair); +} + +/* + * Copy the layer, tree and keypair fields of the address structure. This is + * used when we're doing multiple things within the same OTS keypair + */ +void copy_keypair_addr(uint32_t out[8], const uint32_t in[8]) +{ + memcpy( out, in, SPX_OFFSET_TREE+8 ); + memcpy( (unsigned char *)out + SPX_OFFSET_KP_ADDR, (unsigned char *)in + SPX_OFFSET_KP_ADDR, 4); +} + +/* + * Specify which Merkle chain within the OTS we're working with + * (the chain address) + */ +void set_chain_addr(uint32_t addr[8], uint32_t chain) +{ + ((unsigned char *)addr)[SPX_OFFSET_CHAIN_ADDR] = (unsigned char)chain; +} + +/* + * Specify where in the Merkle chain we are +* (the hash address) + */ +void set_hash_addr(uint32_t addr[8], uint32_t hash) +{ + ((unsigned char *)addr)[SPX_OFFSET_HASH_ADDR] = (unsigned char)hash; +} + +/* These functions are used for all hash tree addresses (including FORS). */ + +/* + * Specify the height of the node in the Merkle/FORS tree we are in + * (the tree height) + */ +void set_tree_height(uint32_t addr[8], uint32_t tree_height) +{ + ((unsigned char *)addr)[SPX_OFFSET_TREE_HGT] = (unsigned char)tree_height; +} + +/* + * Specify the distance from the left edge of the node in the Merkle/FORS tree + * (the tree index) + */ +void set_tree_index(uint32_t addr[8], uint32_t tree_index) +{ + u32_to_bytes(&((unsigned char *)addr)[SPX_OFFSET_TREE_INDEX], tree_index ); +} diff --git a/Blastproof/keygen/address.h b/Blastproof/keygen/address.h new file mode 100644 index 0000000..49f8d66 --- /dev/null +++ b/Blastproof/keygen/address.h @@ -0,0 +1,51 @@ +#ifndef SPX_ADDRESS_H +#define SPX_ADDRESS_H + +#include +#include "params.h" + +/* The hash types that are passed to set_type */ +#define SPX_ADDR_TYPE_WOTS 0 +#define SPX_ADDR_TYPE_WOTSPK 1 +#define SPX_ADDR_TYPE_HASHTREE 2 +#define SPX_ADDR_TYPE_FORSTREE 3 +#define SPX_ADDR_TYPE_FORSPK 4 +#define SPX_ADDR_TYPE_WOTSPRF 5 +#define SPX_ADDR_TYPE_FORSPRF 6 + +#define set_layer_addr SPX_NAMESPACE(set_layer_addr) +void set_layer_addr(uint32_t addr[8], uint32_t layer); + +#define set_tree_addr SPX_NAMESPACE(set_tree_addr) +void set_tree_addr(uint32_t addr[8], uint64_t tree); + +#define set_type SPX_NAMESPACE(set_type) +void set_type(uint32_t addr[8], uint32_t type); + +/* Copies the layer and tree part of one address into the other */ +#define copy_subtree_addr SPX_NAMESPACE(copy_subtree_addr) +void copy_subtree_addr(uint32_t out[8], const uint32_t in[8]); + +/* These functions are used for WOTS and FORS addresses. */ + +#define set_keypair_addr SPX_NAMESPACE(set_keypair_addr) +void set_keypair_addr(uint32_t addr[8], uint32_t keypair); + +#define set_chain_addr SPX_NAMESPACE(set_chain_addr) +void set_chain_addr(uint32_t addr[8], uint32_t chain); + +#define set_hash_addr SPX_NAMESPACE(set_hash_addr) +void set_hash_addr(uint32_t addr[8], uint32_t hash); + +#define copy_keypair_addr SPX_NAMESPACE(copy_keypair_addr) +void copy_keypair_addr(uint32_t out[8], const uint32_t in[8]); + +/* These functions are used for all hash tree addresses (including FORS). */ + +#define set_tree_height SPX_NAMESPACE(set_tree_height) +void set_tree_height(uint32_t addr[8], uint32_t tree_height); + +#define set_tree_index SPX_NAMESPACE(set_tree_index) +void set_tree_index(uint32_t addr[8], uint32_t tree_index); + +#endif diff --git a/Blastproof/keygen/address.o b/Blastproof/keygen/address.o new file mode 100644 index 0000000000000000000000000000000000000000..5f348fe71f0d76369951f675b41a52824f1ddaf8 GIT binary patch literal 2784 zcmbuBO=uHA6o6-AwKiIfqM)=$PX4<#5ie3SrNl0xQq+nEC2o?@)aH-u2AZN+2&hX? z>fJ+cUc{Rh5eXHvf~YqUJa`d2c&L~|4uZZnJ1@JLZZ7&@{=WCSJ1^VbPM$s)(lkb* zvE$4;5oK(#-`gj|Ho*qjMmA+Q-wfwg+o`r1$(FNZG?MLmk%cR-grfcH;%BZHwPM1tsarih-l9TM2Po>^QJ3`ObQjbIVZzd z*=xi36lCVs793I3T5pDp##Gy=FNGV-2?f@=rK{Gi$JpLm)$O&a?5%3qY+`);Sak2i z*{OZe*ik*M$D;9(_|aG_7H56%E}}5j!T)v}*@!l@v48yn z7!L!(fwB{)u}zUXq4B|W7r+3Hs2fENau_Gl-|+O8k%J83MEZ-K{w8ve12~cXnx}t+ z9Apnpq)&VL7sx?Q;6(a4PyYouNU!|AU3E|WuzYE~{70oes6H8b)q*a4?B}0V3IJ<5 zx8!Lzz;>&Ti@N}}OL97kI{-EUBLYK}F#s6HkP7ikusiZdhaA4q%g z?69AQ5oG@mIOcr@D~h-)SRcbc-a-gM^*o0?^3DEt5A2=f`xX9G@}%Tc4>Pmb3b!mh z!_H4$N?F`a<;@zeq>y)zUE$m#)qFl>ms07P%`MNQR_5Z6DV1v}tD5$F3cQdh*E*In zyjC`+eHp4p;@$B?h*f4-TP9~t7rjalQ>a(v3r#^$$5<+!SpG6f;eU?x-^%>KxTERBcu-X-O&C#yH^QLa|S)1uu z(>9r&wk)P+N`(S10xOnmuEV$akY$^h8F{{5tm@@Tsmv>Ojp-r+&!w(a%mSBB@_$Pd z-0@M?DPa=woR`l@YULMZQL!X?db1k_r_c z1dcKP)jtNl7!$vbexdxseh7QYPwO!s^P4V{fgc9go&wen1N+V|W}}Usdop6eZ- +#include + +#include "params.h" + +#define CRYPTO_ALGNAME "SPHINCS+" + +#define CRYPTO_SECRETKEYBYTES SPX_SK_BYTES +#define CRYPTO_PUBLICKEYBYTES SPX_PK_BYTES +#define CRYPTO_BYTES SPX_BYTES +#define CRYPTO_SEEDBYTES 3*SPX_N + +/* + * Returns the length of a secret key, in bytes + */ +unsigned long long crypto_sign_secretkeybytes(void); + +/* + * Returns the length of a public key, in bytes + */ +unsigned long long crypto_sign_publickeybytes(void); + +/* + * Returns the length of a signature, in bytes + */ +unsigned long long crypto_sign_bytes(void); + +/* + * Returns the length of the seed required to generate a key pair, in bytes + */ +unsigned long long crypto_sign_seedbytes(void); + +/* + * Generates a SPHINCS+ key pair given a seed. + * Format sk: [SK_SEED || SK_PRF || PUB_SEED || root] + * Format pk: [root || PUB_SEED] + */ +int crypto_sign_seed_keypair(unsigned char *pk, unsigned char *sk, + const unsigned char *seed); + +/* + * Generates a SPHINCS+ key pair. + * Format sk: [SK_SEED || SK_PRF || PUB_SEED || root] + * Format pk: [root || PUB_SEED] + */ +int crypto_sign_keypair(unsigned char *pk, unsigned char *sk); + +/** + * Returns an array containing a detached signature. + */ +int crypto_sign_signature(uint8_t *sig, size_t *siglen, + const uint8_t *m, size_t mlen, const uint8_t *sk); + +/** + * Verifies a detached signature and message under a given public key. + */ +int crypto_sign_verify(const uint8_t *sig, size_t siglen, + const uint8_t *m, size_t mlen, const uint8_t *pk); + +/** + * Returns an array containing the signature followed by the message. + */ +int crypto_sign(unsigned char *sm, unsigned long long *smlen, + const unsigned char *m, unsigned long long mlen, + const unsigned char *sk); + +/** + * Verifies a given signature-message pair under a given public key. + */ +int crypto_sign_open(unsigned char *m, unsigned long long *mlen, + const unsigned char *sm, unsigned long long smlen, + const unsigned char *pk); + +#endif diff --git a/Blastproof/keygen/argon2.h b/Blastproof/keygen/argon2.h new file mode 100644 index 0000000..3980bb3 --- /dev/null +++ b/Blastproof/keygen/argon2.h @@ -0,0 +1,437 @@ +/* + * Argon2 reference source code package - reference C implementations + * + * Copyright 2015 + * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves + * + * You may use this work under the terms of a Creative Commons CC0 1.0 + * License/Waiver or the Apache Public License 2.0, at your option. The terms of + * these licenses can be found at: + * + * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0 + * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0 + * + * You should have received a copy of both of these licenses along with this + * software. If not, they may be obtained at the above URLs. + */ + +#ifndef ARGON2_H +#define ARGON2_H + +#include +#include +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +/* Symbols visibility control */ +#ifdef A2_VISCTL +#define ARGON2_PUBLIC __attribute__((visibility("default"))) +#define ARGON2_LOCAL __attribute__ ((visibility ("hidden"))) +#elif defined(_MSC_VER) +#define ARGON2_PUBLIC __declspec(dllexport) +#define ARGON2_LOCAL +#else +#define ARGON2_PUBLIC +#define ARGON2_LOCAL +#endif + +/* + * Argon2 input parameter restrictions + */ + +/* Minimum and maximum number of lanes (degree of parallelism) */ +#define ARGON2_MIN_LANES UINT32_C(1) +#define ARGON2_MAX_LANES UINT32_C(0xFFFFFF) + +/* Minimum and maximum number of threads */ +#define ARGON2_MIN_THREADS UINT32_C(1) +#define ARGON2_MAX_THREADS UINT32_C(0xFFFFFF) + +/* Number of synchronization points between lanes per pass */ +#define ARGON2_SYNC_POINTS UINT32_C(4) + +/* Minimum and maximum digest size in bytes */ +#define ARGON2_MIN_OUTLEN UINT32_C(4) +#define ARGON2_MAX_OUTLEN UINT32_C(0xFFFFFFFF) + +/* Minimum and maximum number of memory blocks (each of BLOCK_SIZE bytes) */ +#define ARGON2_MIN_MEMORY (2 * ARGON2_SYNC_POINTS) /* 2 blocks per slice */ + +#define ARGON2_MIN(a, b) ((a) < (b) ? (a) : (b)) +/* Max memory size is addressing-space/2, topping at 2^32 blocks (4 TB) */ +#define ARGON2_MAX_MEMORY_BITS \ + ARGON2_MIN(UINT32_C(32), (sizeof(void *) * CHAR_BIT - 10 - 1)) +#define ARGON2_MAX_MEMORY \ + ARGON2_MIN(UINT32_C(0xFFFFFFFF), UINT64_C(1) << ARGON2_MAX_MEMORY_BITS) + +/* Minimum and maximum number of passes */ +#define ARGON2_MIN_TIME UINT32_C(1) +#define ARGON2_MAX_TIME UINT32_C(0xFFFFFFFF) + +/* Minimum and maximum password length in bytes */ +#define ARGON2_MIN_PWD_LENGTH UINT32_C(0) +#define ARGON2_MAX_PWD_LENGTH UINT32_C(0xFFFFFFFF) + +/* Minimum and maximum associated data length in bytes */ +#define ARGON2_MIN_AD_LENGTH UINT32_C(0) +#define ARGON2_MAX_AD_LENGTH UINT32_C(0xFFFFFFFF) + +/* Minimum and maximum salt length in bytes */ +#define ARGON2_MIN_SALT_LENGTH UINT32_C(8) +#define ARGON2_MAX_SALT_LENGTH UINT32_C(0xFFFFFFFF) + +/* Minimum and maximum key length in bytes */ +#define ARGON2_MIN_SECRET UINT32_C(0) +#define ARGON2_MAX_SECRET UINT32_C(0xFFFFFFFF) + +/* Flags to determine which fields are securely wiped (default = no wipe). */ +#define ARGON2_DEFAULT_FLAGS UINT32_C(0) +#define ARGON2_FLAG_CLEAR_PASSWORD (UINT32_C(1) << 0) +#define ARGON2_FLAG_CLEAR_SECRET (UINT32_C(1) << 1) + +/* Global flag to determine if we are wiping internal memory buffers. This flag + * is defined in core.c and defaults to 1 (wipe internal memory). */ +extern int FLAG_clear_internal_memory; + +/* Error codes */ +typedef enum Argon2_ErrorCodes { + ARGON2_OK = 0, + + ARGON2_OUTPUT_PTR_NULL = -1, + + ARGON2_OUTPUT_TOO_SHORT = -2, + ARGON2_OUTPUT_TOO_LONG = -3, + + ARGON2_PWD_TOO_SHORT = -4, + ARGON2_PWD_TOO_LONG = -5, + + ARGON2_SALT_TOO_SHORT = -6, + ARGON2_SALT_TOO_LONG = -7, + + ARGON2_AD_TOO_SHORT = -8, + ARGON2_AD_TOO_LONG = -9, + + ARGON2_SECRET_TOO_SHORT = -10, + ARGON2_SECRET_TOO_LONG = -11, + + ARGON2_TIME_TOO_SMALL = -12, + ARGON2_TIME_TOO_LARGE = -13, + + ARGON2_MEMORY_TOO_LITTLE = -14, + ARGON2_MEMORY_TOO_MUCH = -15, + + ARGON2_LANES_TOO_FEW = -16, + ARGON2_LANES_TOO_MANY = -17, + + ARGON2_PWD_PTR_MISMATCH = -18, /* NULL ptr with non-zero length */ + ARGON2_SALT_PTR_MISMATCH = -19, /* NULL ptr with non-zero length */ + ARGON2_SECRET_PTR_MISMATCH = -20, /* NULL ptr with non-zero length */ + ARGON2_AD_PTR_MISMATCH = -21, /* NULL ptr with non-zero length */ + + ARGON2_MEMORY_ALLOCATION_ERROR = -22, + + ARGON2_FREE_MEMORY_CBK_NULL = -23, + ARGON2_ALLOCATE_MEMORY_CBK_NULL = -24, + + ARGON2_INCORRECT_PARAMETER = -25, + ARGON2_INCORRECT_TYPE = -26, + + ARGON2_OUT_PTR_MISMATCH = -27, + + ARGON2_THREADS_TOO_FEW = -28, + ARGON2_THREADS_TOO_MANY = -29, + + ARGON2_MISSING_ARGS = -30, + + ARGON2_ENCODING_FAIL = -31, + + ARGON2_DECODING_FAIL = -32, + + ARGON2_THREAD_FAIL = -33, + + ARGON2_DECODING_LENGTH_FAIL = -34, + + ARGON2_VERIFY_MISMATCH = -35 +} argon2_error_codes; + +/* Memory allocator types --- for external allocation */ +typedef int (*allocate_fptr)(uint8_t **memory, size_t bytes_to_allocate); +typedef void (*deallocate_fptr)(uint8_t *memory, size_t bytes_to_allocate); + +/* Argon2 external data structures */ + +/* + ***** + * Context: structure to hold Argon2 inputs: + * output array and its length, + * password and its length, + * salt and its length, + * secret and its length, + * associated data and its length, + * number of passes, amount of used memory (in KBytes, can be rounded up a bit) + * number of parallel threads that will be run. + * All the parameters above affect the output hash value. + * Additionally, two function pointers can be provided to allocate and + * deallocate the memory (if NULL, memory will be allocated internally). + * Also, three flags indicate whether to erase password, secret as soon as they + * are pre-hashed (and thus not needed anymore), and the entire memory + ***** + * Simplest situation: you have output array out[8], password is stored in + * pwd[32], salt is stored in salt[16], you do not have keys nor associated + * data. You need to spend 1 GB of RAM and you run 5 passes of Argon2d with + * 4 parallel lanes. + * You want to erase the password, but you're OK with last pass not being + * erased. You want to use the default memory allocator. + * Then you initialize: + Argon2_Context(out,8,pwd,32,salt,16,NULL,0,NULL,0,5,1<<20,4,4,NULL,NULL,true,false,false,false) + */ +typedef struct Argon2_Context { + uint8_t *out; /* output array */ + uint32_t outlen; /* digest length */ + + uint8_t *pwd; /* password array */ + uint32_t pwdlen; /* password length */ + + uint8_t *salt; /* salt array */ + uint32_t saltlen; /* salt length */ + + uint8_t *secret; /* key array */ + uint32_t secretlen; /* key length */ + + uint8_t *ad; /* associated data array */ + uint32_t adlen; /* associated data length */ + + uint32_t t_cost; /* number of passes */ + uint32_t m_cost; /* amount of memory requested (KB) */ + uint32_t lanes; /* number of lanes */ + uint32_t threads; /* maximum number of threads */ + + uint32_t version; /* version number */ + + allocate_fptr allocate_cbk; /* pointer to memory allocator */ + deallocate_fptr free_cbk; /* pointer to memory deallocator */ + + uint32_t flags; /* array of bool options */ +} argon2_context; + +/* Argon2 primitive type */ +typedef enum Argon2_type { + Argon2_d = 0, + Argon2_i = 1, + Argon2_id = 2 +} argon2_type; + +/* Version of the algorithm */ +typedef enum Argon2_version { + ARGON2_VERSION_10 = 0x10, + ARGON2_VERSION_13 = 0x13, + ARGON2_VERSION_NUMBER = ARGON2_VERSION_13 +} argon2_version; + +/* + * Function that gives the string representation of an argon2_type. + * @param type The argon2_type that we want the string for + * @param uppercase Whether the string should have the first letter uppercase + * @return NULL if invalid type, otherwise the string representation. + */ +ARGON2_PUBLIC const char *argon2_type2string(argon2_type type, int uppercase); + +/* + * Function that performs memory-hard hashing with certain degree of parallelism + * @param context Pointer to the Argon2 internal structure + * @return Error code if smth is wrong, ARGON2_OK otherwise + */ +ARGON2_PUBLIC int argon2_ctx(argon2_context *context, argon2_type type); + +/** + * Hashes a password with Argon2i, producing an encoded hash + * @param t_cost Number of iterations + * @param m_cost Sets memory usage to m_cost kibibytes + * @param parallelism Number of threads and compute lanes + * @param pwd Pointer to password + * @param pwdlen Password size in bytes + * @param salt Pointer to salt + * @param saltlen Salt size in bytes + * @param hashlen Desired length of the hash in bytes + * @param encoded Buffer where to write the encoded hash + * @param encodedlen Size of the buffer (thus max size of the encoded hash) + * @pre Different parallelism levels will give different results + * @pre Returns ARGON2_OK if successful + */ +ARGON2_PUBLIC int argon2i_hash_encoded(const uint32_t t_cost, + const uint32_t m_cost, + const uint32_t parallelism, + const void *pwd, const size_t pwdlen, + const void *salt, const size_t saltlen, + const size_t hashlen, char *encoded, + const size_t encodedlen); + +/** + * Hashes a password with Argon2i, producing a raw hash at @hash + * @param t_cost Number of iterations + * @param m_cost Sets memory usage to m_cost kibibytes + * @param parallelism Number of threads and compute lanes + * @param pwd Pointer to password + * @param pwdlen Password size in bytes + * @param salt Pointer to salt + * @param saltlen Salt size in bytes + * @param hash Buffer where to write the raw hash - updated by the function + * @param hashlen Desired length of the hash in bytes + * @pre Different parallelism levels will give different results + * @pre Returns ARGON2_OK if successful + */ +ARGON2_PUBLIC int argon2i_hash_raw(const uint32_t t_cost, const uint32_t m_cost, + const uint32_t parallelism, const void *pwd, + const size_t pwdlen, const void *salt, + const size_t saltlen, void *hash, + const size_t hashlen); + +ARGON2_PUBLIC int argon2d_hash_encoded(const uint32_t t_cost, + const uint32_t m_cost, + const uint32_t parallelism, + const void *pwd, const size_t pwdlen, + const void *salt, const size_t saltlen, + const size_t hashlen, char *encoded, + const size_t encodedlen); + +ARGON2_PUBLIC int argon2d_hash_raw(const uint32_t t_cost, const uint32_t m_cost, + const uint32_t parallelism, const void *pwd, + const size_t pwdlen, const void *salt, + const size_t saltlen, void *hash, + const size_t hashlen); + +ARGON2_PUBLIC int argon2id_hash_encoded(const uint32_t t_cost, + const uint32_t m_cost, + const uint32_t parallelism, + const void *pwd, const size_t pwdlen, + const void *salt, const size_t saltlen, + const size_t hashlen, char *encoded, + const size_t encodedlen); + +ARGON2_PUBLIC int argon2id_hash_raw(const uint32_t t_cost, + const uint32_t m_cost, + const uint32_t parallelism, const void *pwd, + const size_t pwdlen, const void *salt, + const size_t saltlen, void *hash, + const size_t hashlen); + +/* generic function underlying the above ones */ +ARGON2_PUBLIC int argon2_hash(const uint32_t t_cost, const uint32_t m_cost, + const uint32_t parallelism, const void *pwd, + const size_t pwdlen, const void *salt, + const size_t saltlen, void *hash, + const size_t hashlen, char *encoded, + const size_t encodedlen, argon2_type type, + const uint32_t version); + +/** + * Verifies a password against an encoded string + * Encoded string is restricted as in validate_inputs() + * @param encoded String encoding parameters, salt, hash + * @param pwd Pointer to password + * @pre Returns ARGON2_OK if successful + */ +ARGON2_PUBLIC int argon2i_verify(const char *encoded, const void *pwd, + const size_t pwdlen); + +ARGON2_PUBLIC int argon2d_verify(const char *encoded, const void *pwd, + const size_t pwdlen); + +ARGON2_PUBLIC int argon2id_verify(const char *encoded, const void *pwd, + const size_t pwdlen); + +/* generic function underlying the above ones */ +ARGON2_PUBLIC int argon2_verify(const char *encoded, const void *pwd, + const size_t pwdlen, argon2_type type); + +/** + * Argon2d: Version of Argon2 that picks memory blocks depending + * on the password and salt. Only for side-channel-free + * environment!! + ***** + * @param context Pointer to current Argon2 context + * @return Zero if successful, a non zero error code otherwise + */ +ARGON2_PUBLIC int argon2d_ctx(argon2_context *context); + +/** + * Argon2i: Version of Argon2 that picks memory blocks + * independent on the password and salt. Good for side-channels, + * but worse w.r.t. tradeoff attacks if only one pass is used. + ***** + * @param context Pointer to current Argon2 context + * @return Zero if successful, a non zero error code otherwise + */ +ARGON2_PUBLIC int argon2i_ctx(argon2_context *context); + +/** + * Argon2id: Version of Argon2 where the first half-pass over memory is + * password-independent, the rest are password-dependent (on the password and + * salt). OK against side channels (they reduce to 1/2-pass Argon2i), and + * better with w.r.t. tradeoff attacks (similar to Argon2d). + ***** + * @param context Pointer to current Argon2 context + * @return Zero if successful, a non zero error code otherwise + */ +ARGON2_PUBLIC int argon2id_ctx(argon2_context *context); + +/** + * Verify if a given password is correct for Argon2d hashing + * @param context Pointer to current Argon2 context + * @param hash The password hash to verify. The length of the hash is + * specified by the context outlen member + * @return Zero if successful, a non zero error code otherwise + */ +ARGON2_PUBLIC int argon2d_verify_ctx(argon2_context *context, const char *hash); + +/** + * Verify if a given password is correct for Argon2i hashing + * @param context Pointer to current Argon2 context + * @param hash The password hash to verify. The length of the hash is + * specified by the context outlen member + * @return Zero if successful, a non zero error code otherwise + */ +ARGON2_PUBLIC int argon2i_verify_ctx(argon2_context *context, const char *hash); + +/** + * Verify if a given password is correct for Argon2id hashing + * @param context Pointer to current Argon2 context + * @param hash The password hash to verify. The length of the hash is + * specified by the context outlen member + * @return Zero if successful, a non zero error code otherwise + */ +ARGON2_PUBLIC int argon2id_verify_ctx(argon2_context *context, + const char *hash); + +/* generic function underlying the above ones */ +ARGON2_PUBLIC int argon2_verify_ctx(argon2_context *context, const char *hash, + argon2_type type); + +/** + * Get the associated error message for given error code + * @return The error message associated with the given error code + */ +ARGON2_PUBLIC const char *argon2_error_message(int error_code); + +/** + * Returns the encoded hash length for the given input parameters + * @param t_cost Number of iterations + * @param m_cost Memory usage in kibibytes + * @param parallelism Number of threads; used to compute lanes + * @param saltlen Salt size in bytes + * @param hashlen Hash size in bytes + * @param type The argon2_type that we want the encoded length for + * @return The encoded hash length in bytes + */ +ARGON2_PUBLIC size_t argon2_encodedlen(uint32_t t_cost, uint32_t m_cost, + uint32_t parallelism, uint32_t saltlen, + uint32_t hashlen, argon2_type type); + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/Blastproof/keygen/argon2/Argon2.sln b/Blastproof/keygen/argon2/Argon2.sln new file mode 100644 index 0000000..b16cda0 --- /dev/null +++ b/Blastproof/keygen/argon2/Argon2.sln @@ -0,0 +1,158 @@ + +Microsoft Visual Studio Solution File, Format Version 11.00 +# Visual Studio 2010 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Argon2OptTestCI", "vs2015\Argon2OptTestCI\Argon2OptTestCI.vcxproj", "{12956597-5E42-433A-93F3-D4EFF50AA207}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Argon2RefTestCI", "vs2015\Argon2RefTestCI\Argon2RefTestCI.vcxproj", "{8A1F7F84-34AF-4DB2-9D58-D4823DFE79E9}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Argon2OptGenKAT", "vs2015\Argon2OptGenKAT\Argon2OptGenKAT.vcxproj", "{DBBAAAE6-4560-4D11-8280-30A6650A82EF}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Argon2RefGenKAT", "vs2015\Argon2RefGenKAT\Argon2RefGenKAT.vcxproj", "{71921B4C-A795-4A37-95A3-99D600E01211}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Argon2Opt", "vs2015\Argon2Opt\Argon2Opt.vcxproj", "{CAA75C57-998C-494E-B8A5-5894EF0FC528}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Argon2Ref", "vs2015\Argon2Ref\Argon2Ref.vcxproj", "{B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Argon2OptBench", "vs2015\Argon2OptBench\Argon2OptBench.vcxproj", "{B3A0FB44-0C1C-4EC3-B155-8B39371F8EE4}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Argon2RefBench", "vs2015\Argon2RefBench\Argon2RefBench.vcxproj", "{99203F6A-6E8C-42FC-8C7C-C07E8913D539}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Argon2OptDll", "vs2015\Argon2OptDll\Argon2OptDll.vcxproj", "{3A898DD8-ACAE-4269-ADFE-EB7260D71583}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Argon2RefDll", "vs2015\Argon2RefDll\Argon2RefDll.vcxproj", "{19D911A1-533C-4475-B313-F372481A35D4}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + ReleaseStatic|x64 = ReleaseStatic|x64 + ReleaseStatic|x86 = ReleaseStatic|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {12956597-5E42-433A-93F3-D4EFF50AA207}.Debug|x64.ActiveCfg = Debug|x64 + {12956597-5E42-433A-93F3-D4EFF50AA207}.Debug|x64.Build.0 = Debug|x64 + {12956597-5E42-433A-93F3-D4EFF50AA207}.Debug|x86.ActiveCfg = Debug|Win32 + {12956597-5E42-433A-93F3-D4EFF50AA207}.Debug|x86.Build.0 = Debug|Win32 + {12956597-5E42-433A-93F3-D4EFF50AA207}.Release|x64.ActiveCfg = Release|x64 + {12956597-5E42-433A-93F3-D4EFF50AA207}.Release|x64.Build.0 = Release|x64 + {12956597-5E42-433A-93F3-D4EFF50AA207}.Release|x86.ActiveCfg = Release|Win32 + {12956597-5E42-433A-93F3-D4EFF50AA207}.Release|x86.Build.0 = Release|Win32 + {12956597-5E42-433A-93F3-D4EFF50AA207}.ReleaseStatic|x64.ActiveCfg = ReleaseStatic|x64 + {12956597-5E42-433A-93F3-D4EFF50AA207}.ReleaseStatic|x64.Build.0 = ReleaseStatic|x64 + {12956597-5E42-433A-93F3-D4EFF50AA207}.ReleaseStatic|x86.ActiveCfg = ReleaseStatic|Win32 + {12956597-5E42-433A-93F3-D4EFF50AA207}.ReleaseStatic|x86.Build.0 = ReleaseStatic|Win32 + {8A1F7F84-34AF-4DB2-9D58-D4823DFE79E9}.Debug|x64.ActiveCfg = Debug|x64 + {8A1F7F84-34AF-4DB2-9D58-D4823DFE79E9}.Debug|x64.Build.0 = Debug|x64 + {8A1F7F84-34AF-4DB2-9D58-D4823DFE79E9}.Debug|x86.ActiveCfg = Debug|Win32 + {8A1F7F84-34AF-4DB2-9D58-D4823DFE79E9}.Debug|x86.Build.0 = Debug|Win32 + {8A1F7F84-34AF-4DB2-9D58-D4823DFE79E9}.Release|x64.ActiveCfg = Release|x64 + {8A1F7F84-34AF-4DB2-9D58-D4823DFE79E9}.Release|x64.Build.0 = Release|x64 + {8A1F7F84-34AF-4DB2-9D58-D4823DFE79E9}.Release|x86.ActiveCfg = Release|Win32 + {8A1F7F84-34AF-4DB2-9D58-D4823DFE79E9}.Release|x86.Build.0 = Release|Win32 + {8A1F7F84-34AF-4DB2-9D58-D4823DFE79E9}.ReleaseStatic|x64.ActiveCfg = ReleaseStatic|x64 + {8A1F7F84-34AF-4DB2-9D58-D4823DFE79E9}.ReleaseStatic|x64.Build.0 = ReleaseStatic|x64 + {8A1F7F84-34AF-4DB2-9D58-D4823DFE79E9}.ReleaseStatic|x86.ActiveCfg = ReleaseStatic|Win32 + {8A1F7F84-34AF-4DB2-9D58-D4823DFE79E9}.ReleaseStatic|x86.Build.0 = ReleaseStatic|Win32 + {DBBAAAE6-4560-4D11-8280-30A6650A82EF}.Debug|x64.ActiveCfg = Debug|x64 + {DBBAAAE6-4560-4D11-8280-30A6650A82EF}.Debug|x64.Build.0 = Debug|x64 + {DBBAAAE6-4560-4D11-8280-30A6650A82EF}.Debug|x86.ActiveCfg = Debug|Win32 + {DBBAAAE6-4560-4D11-8280-30A6650A82EF}.Debug|x86.Build.0 = Debug|Win32 + {DBBAAAE6-4560-4D11-8280-30A6650A82EF}.Release|x64.ActiveCfg = Release|x64 + {DBBAAAE6-4560-4D11-8280-30A6650A82EF}.Release|x64.Build.0 = Release|x64 + {DBBAAAE6-4560-4D11-8280-30A6650A82EF}.Release|x86.ActiveCfg = Release|Win32 + {DBBAAAE6-4560-4D11-8280-30A6650A82EF}.Release|x86.Build.0 = Release|Win32 + {DBBAAAE6-4560-4D11-8280-30A6650A82EF}.ReleaseStatic|x64.ActiveCfg = ReleaseStatic|x64 + {DBBAAAE6-4560-4D11-8280-30A6650A82EF}.ReleaseStatic|x64.Build.0 = ReleaseStatic|x64 + {DBBAAAE6-4560-4D11-8280-30A6650A82EF}.ReleaseStatic|x86.ActiveCfg = ReleaseStatic|Win32 + {DBBAAAE6-4560-4D11-8280-30A6650A82EF}.ReleaseStatic|x86.Build.0 = ReleaseStatic|Win32 + {71921B4C-A795-4A37-95A3-99D600E01211}.Debug|x64.ActiveCfg = Debug|x64 + {71921B4C-A795-4A37-95A3-99D600E01211}.Debug|x64.Build.0 = Debug|x64 + {71921B4C-A795-4A37-95A3-99D600E01211}.Debug|x86.ActiveCfg = Debug|Win32 + {71921B4C-A795-4A37-95A3-99D600E01211}.Debug|x86.Build.0 = Debug|Win32 + {71921B4C-A795-4A37-95A3-99D600E01211}.Release|x64.ActiveCfg = Release|x64 + {71921B4C-A795-4A37-95A3-99D600E01211}.Release|x64.Build.0 = Release|x64 + {71921B4C-A795-4A37-95A3-99D600E01211}.Release|x86.ActiveCfg = Release|Win32 + {71921B4C-A795-4A37-95A3-99D600E01211}.Release|x86.Build.0 = Release|Win32 + {71921B4C-A795-4A37-95A3-99D600E01211}.ReleaseStatic|x64.ActiveCfg = ReleaseStatic|x64 + {71921B4C-A795-4A37-95A3-99D600E01211}.ReleaseStatic|x64.Build.0 = ReleaseStatic|x64 + {71921B4C-A795-4A37-95A3-99D600E01211}.ReleaseStatic|x86.ActiveCfg = ReleaseStatic|Win32 + {71921B4C-A795-4A37-95A3-99D600E01211}.ReleaseStatic|x86.Build.0 = ReleaseStatic|Win32 + {CAA75C57-998C-494E-B8A5-5894EF0FC528}.Debug|x64.ActiveCfg = Debug|x64 + {CAA75C57-998C-494E-B8A5-5894EF0FC528}.Debug|x64.Build.0 = Debug|x64 + {CAA75C57-998C-494E-B8A5-5894EF0FC528}.Debug|x86.ActiveCfg = Debug|Win32 + {CAA75C57-998C-494E-B8A5-5894EF0FC528}.Debug|x86.Build.0 = Debug|Win32 + {CAA75C57-998C-494E-B8A5-5894EF0FC528}.Release|x64.ActiveCfg = Release|x64 + {CAA75C57-998C-494E-B8A5-5894EF0FC528}.Release|x64.Build.0 = Release|x64 + {CAA75C57-998C-494E-B8A5-5894EF0FC528}.Release|x86.ActiveCfg = Release|Win32 + {CAA75C57-998C-494E-B8A5-5894EF0FC528}.Release|x86.Build.0 = Release|Win32 + {CAA75C57-998C-494E-B8A5-5894EF0FC528}.ReleaseStatic|x64.ActiveCfg = ReleaseStatic|x64 + {CAA75C57-998C-494E-B8A5-5894EF0FC528}.ReleaseStatic|x64.Build.0 = ReleaseStatic|x64 + {CAA75C57-998C-494E-B8A5-5894EF0FC528}.ReleaseStatic|x86.ActiveCfg = ReleaseStatic|Win32 + {CAA75C57-998C-494E-B8A5-5894EF0FC528}.ReleaseStatic|x86.Build.0 = ReleaseStatic|Win32 + {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.Debug|x64.ActiveCfg = Debug|x64 + {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.Debug|x64.Build.0 = Debug|x64 + {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.Debug|x86.ActiveCfg = Debug|Win32 + {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.Debug|x86.Build.0 = Debug|Win32 + {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.Release|x64.ActiveCfg = Release|x64 + {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.Release|x64.Build.0 = Release|x64 + {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.Release|x86.ActiveCfg = Release|Win32 + {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.Release|x86.Build.0 = Release|Win32 + {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.ReleaseStatic|x64.ActiveCfg = ReleaseStatic|x64 + {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.ReleaseStatic|x64.Build.0 = ReleaseStatic|x64 + {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.ReleaseStatic|x86.ActiveCfg = ReleaseStatic|Win32 + {B9CAC9CE-9F0D-4F52-8D67-FDBBAFCD0DE2}.ReleaseStatic|x86.Build.0 = ReleaseStatic|Win32 + {B3A0FB44-0C1C-4EC3-B155-8B39371F8EE4}.Debug|x64.ActiveCfg = Debug|x64 + {B3A0FB44-0C1C-4EC3-B155-8B39371F8EE4}.Debug|x64.Build.0 = Debug|x64 + {B3A0FB44-0C1C-4EC3-B155-8B39371F8EE4}.Debug|x86.ActiveCfg = Debug|Win32 + {B3A0FB44-0C1C-4EC3-B155-8B39371F8EE4}.Debug|x86.Build.0 = Debug|Win32 + {B3A0FB44-0C1C-4EC3-B155-8B39371F8EE4}.Release|x64.ActiveCfg = Release|x64 + {B3A0FB44-0C1C-4EC3-B155-8B39371F8EE4}.Release|x64.Build.0 = Release|x64 + {B3A0FB44-0C1C-4EC3-B155-8B39371F8EE4}.Release|x86.ActiveCfg = Release|Win32 + {B3A0FB44-0C1C-4EC3-B155-8B39371F8EE4}.Release|x86.Build.0 = Release|Win32 + {B3A0FB44-0C1C-4EC3-B155-8B39371F8EE4}.ReleaseStatic|x64.ActiveCfg = ReleaseStatic|x64 + {B3A0FB44-0C1C-4EC3-B155-8B39371F8EE4}.ReleaseStatic|x64.Build.0 = ReleaseStatic|x64 + {B3A0FB44-0C1C-4EC3-B155-8B39371F8EE4}.ReleaseStatic|x86.ActiveCfg = ReleaseStatic|Win32 + {B3A0FB44-0C1C-4EC3-B155-8B39371F8EE4}.ReleaseStatic|x86.Build.0 = ReleaseStatic|Win32 + {99203F6A-6E8C-42FC-8C7C-C07E8913D539}.Debug|x64.ActiveCfg = Debug|x64 + {99203F6A-6E8C-42FC-8C7C-C07E8913D539}.Debug|x64.Build.0 = Debug|x64 + {99203F6A-6E8C-42FC-8C7C-C07E8913D539}.Debug|x86.ActiveCfg = Debug|Win32 + {99203F6A-6E8C-42FC-8C7C-C07E8913D539}.Debug|x86.Build.0 = Debug|Win32 + {99203F6A-6E8C-42FC-8C7C-C07E8913D539}.Release|x64.ActiveCfg = Release|x64 + {99203F6A-6E8C-42FC-8C7C-C07E8913D539}.Release|x64.Build.0 = Release|x64 + {99203F6A-6E8C-42FC-8C7C-C07E8913D539}.Release|x86.ActiveCfg = Release|Win32 + {99203F6A-6E8C-42FC-8C7C-C07E8913D539}.Release|x86.Build.0 = Release|Win32 + {99203F6A-6E8C-42FC-8C7C-C07E8913D539}.ReleaseStatic|x64.ActiveCfg = ReleaseStatic|x64 + {99203F6A-6E8C-42FC-8C7C-C07E8913D539}.ReleaseStatic|x64.Build.0 = ReleaseStatic|x64 + {99203F6A-6E8C-42FC-8C7C-C07E8913D539}.ReleaseStatic|x86.ActiveCfg = ReleaseStatic|Win32 + {99203F6A-6E8C-42FC-8C7C-C07E8913D539}.ReleaseStatic|x86.Build.0 = ReleaseStatic|Win32 + {3A898DD8-ACAE-4269-ADFE-EB7260D71583}.Debug|x64.ActiveCfg = Debug|x64 + {3A898DD8-ACAE-4269-ADFE-EB7260D71583}.Debug|x64.Build.0 = Debug|x64 + {3A898DD8-ACAE-4269-ADFE-EB7260D71583}.Debug|x86.ActiveCfg = Debug|Win32 + {3A898DD8-ACAE-4269-ADFE-EB7260D71583}.Debug|x86.Build.0 = Debug|Win32 + {3A898DD8-ACAE-4269-ADFE-EB7260D71583}.Release|x64.ActiveCfg = Release|x64 + {3A898DD8-ACAE-4269-ADFE-EB7260D71583}.Release|x64.Build.0 = Release|x64 + {3A898DD8-ACAE-4269-ADFE-EB7260D71583}.Release|x86.ActiveCfg = Release|Win32 + {3A898DD8-ACAE-4269-ADFE-EB7260D71583}.Release|x86.Build.0 = Release|Win32 + {3A898DD8-ACAE-4269-ADFE-EB7260D71583}.ReleaseStatic|x64.ActiveCfg = ReleaseStatic|x64 + {3A898DD8-ACAE-4269-ADFE-EB7260D71583}.ReleaseStatic|x64.Build.0 = ReleaseStatic|x64 + {3A898DD8-ACAE-4269-ADFE-EB7260D71583}.ReleaseStatic|x86.ActiveCfg = ReleaseStatic|Win32 + {3A898DD8-ACAE-4269-ADFE-EB7260D71583}.ReleaseStatic|x86.Build.0 = ReleaseStatic|Win32 + {19D911A1-533C-4475-B313-F372481A35D4}.Debug|x64.ActiveCfg = Debug|x64 + {19D911A1-533C-4475-B313-F372481A35D4}.Debug|x64.Build.0 = Debug|x64 + {19D911A1-533C-4475-B313-F372481A35D4}.Debug|x86.ActiveCfg = Debug|Win32 + {19D911A1-533C-4475-B313-F372481A35D4}.Debug|x86.Build.0 = Debug|Win32 + {19D911A1-533C-4475-B313-F372481A35D4}.Release|x64.ActiveCfg = Release|x64 + {19D911A1-533C-4475-B313-F372481A35D4}.Release|x64.Build.0 = Release|x64 + {19D911A1-533C-4475-B313-F372481A35D4}.Release|x86.ActiveCfg = Release|Win32 + {19D911A1-533C-4475-B313-F372481A35D4}.Release|x86.Build.0 = Release|Win32 + {19D911A1-533C-4475-B313-F372481A35D4}.ReleaseStatic|x64.ActiveCfg = ReleaseStatic|x64 + {19D911A1-533C-4475-B313-F372481A35D4}.ReleaseStatic|x64.Build.0 = ReleaseStatic|x64 + {19D911A1-533C-4475-B313-F372481A35D4}.ReleaseStatic|x86.ActiveCfg = ReleaseStatic|Win32 + {19D911A1-533C-4475-B313-F372481A35D4}.ReleaseStatic|x86.Build.0 = ReleaseStatic|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/Blastproof/keygen/argon2/CHANGELOG.md b/Blastproof/keygen/argon2/CHANGELOG.md new file mode 100644 index 0000000..0578fde --- /dev/null +++ b/Blastproof/keygen/argon2/CHANGELOG.md @@ -0,0 +1,32 @@ +# 20171227 +* Added ABI version number +* AVX2/AVX-512F optimizations of BLAMKA +* Set Argon2 version number from the command line +* New bindings +* Minor bug and warning fixes (no security issue) + +# 20161029 + +* Argon2id added +* Better documentation +* Dual licensing CC0 / Apache 2.0 +* Minor bug fixes (no security issue) + +# 20160406 + +* Version 1.3 of Argon2 +* Version number in encoded hash +* Refactored low-level API +* Visibility control for library symbols +* Microsoft Visual Studio solution +* New bindings +* Minor bug and warning fixes (no security issue) + + +# 20151206 + +* Python bindings +* Password read from stdin, instead of being an argument +* Compatibility FreeBSD, NetBSD, OpenBSD +* Constant-time verification +* Minor bug and warning fixes (no security issue) diff --git a/Blastproof/keygen/argon2/LICENSE b/Blastproof/keygen/argon2/LICENSE new file mode 100644 index 0000000..a16d6d2 --- /dev/null +++ b/Blastproof/keygen/argon2/LICENSE @@ -0,0 +1,314 @@ +Argon2 reference source code package - reference C implementations + +Copyright 2015 +Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves + +You may use this work under the terms of a Creative Commons CC0 1.0 +License/Waiver or the Apache Public License 2.0, at your option. The terms of +these licenses can be found at: + +- CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0 +- Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0 + +The terms of the licenses are reproduced below. + +-------------------------------------------------------------------------------- + +Creative Commons Legal Code + +CC0 1.0 Universal + + CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE + LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN + ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS + INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES + REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS + PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM + THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED + HEREUNDER. + +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer +exclusive Copyright and Related Rights (defined below) upon the creator +and subsequent owner(s) (each and all, an "owner") of an original work of +authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for +the purpose of contributing to a commons of creative, cultural and +scientific works ("Commons") that the public can reliably and without fear +of later claims of infringement build upon, modify, incorporate in other +works, reuse and redistribute as freely as possible in any form whatsoever +and for any purposes, including without limitation commercial purposes. +These owners may contribute to the Commons to promote the ideal of a free +culture and the further production of creative, cultural and scientific +works, or to gain reputation or greater distribution for their Work in +part through the use and efforts of others. + +For these and/or other purposes and motivations, and without any +expectation of additional consideration or compensation, the person +associating CC0 with a Work (the "Affirmer"), to the extent that he or she +is an owner of Copyright and Related Rights in the Work, voluntarily +elects to apply CC0 to the Work and publicly distribute the Work under its +terms, with knowledge of his or her Copyright and Related Rights in the +Work and the meaning and intended legal effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be +protected by copyright and related or neighboring rights ("Copyright and +Related Rights"). Copyright and Related Rights include, but are not +limited to, the following: + + i. the right to reproduce, adapt, distribute, perform, display, + communicate, and translate a Work; + ii. moral rights retained by the original author(s) and/or performer(s); +iii. publicity and privacy rights pertaining to a person's image or + likeness depicted in a Work; + iv. rights protecting against unfair competition in regards to a Work, + subject to the limitations in paragraph 4(a), below; + v. rights protecting the extraction, dissemination, use and reuse of data + in a Work; + vi. database rights (such as those arising under Directive 96/9/EC of the + European Parliament and of the Council of 11 March 1996 on the legal + protection of databases, and under any national implementation + thereof, including any amended or successor version of such + directive); and +vii. other similar, equivalent or corresponding rights throughout the + world based on applicable law or treaty, and any national + implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention +of, applicable law, Affirmer hereby overtly, fully, permanently, +irrevocably and unconditionally waives, abandons, and surrenders all of +Affirmer's Copyright and Related Rights and associated claims and causes +of action, whether now known or unknown (including existing as well as +future claims and causes of action), in the Work (i) in all territories +worldwide, (ii) for the maximum duration provided by applicable law or +treaty (including future time extensions), (iii) in any current or future +medium and for any number of copies, and (iv) for any purpose whatsoever, +including without limitation commercial, advertising or promotional +purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each +member of the public at large and to the detriment of Affirmer's heirs and +successors, fully intending that such Waiver shall not be subject to +revocation, rescission, cancellation, termination, or any other legal or +equitable action to disrupt the quiet enjoyment of the Work by the public +as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason +be judged legally invalid or ineffective under applicable law, then the +Waiver shall be preserved to the maximum extent permitted taking into +account Affirmer's express Statement of Purpose. In addition, to the +extent the Waiver is so judged Affirmer hereby grants to each affected +person a royalty-free, non transferable, non sublicensable, non exclusive, +irrevocable and unconditional license to exercise Affirmer's Copyright and +Related Rights in the Work (i) in all territories worldwide, (ii) for the +maximum duration provided by applicable law or treaty (including future +time extensions), (iii) in any current or future medium and for any number +of copies, and (iv) for any purpose whatsoever, including without +limitation commercial, advertising or promotional purposes (the +"License"). The License shall be deemed effective as of the date CC0 was +applied by Affirmer to the Work. Should any part of the License for any +reason be judged legally invalid or ineffective under applicable law, such +partial invalidity or ineffectiveness shall not invalidate the remainder +of the License, and in such case Affirmer hereby affirms that he or she +will not (i) exercise any of his or her remaining Copyright and Related +Rights in the Work or (ii) assert any associated claims and causes of +action with respect to the Work, in either case contrary to Affirmer's +express Statement of Purpose. + +4. Limitations and Disclaimers. + + a. No trademark or patent rights held by Affirmer are waived, abandoned, + surrendered, licensed or otherwise affected by this document. + b. Affirmer offers the Work as-is and makes no representations or + warranties of any kind concerning the Work, express, implied, + statutory or otherwise, including without limitation warranties of + title, merchantability, fitness for a particular purpose, non + infringement, or the absence of latent or other defects, accuracy, or + the present or absence of errors, whether or not discoverable, all to + the greatest extent permissible under applicable law. + c. Affirmer disclaims responsibility for clearing rights of other persons + that may apply to the Work or any use thereof, including without + limitation any person's Copyright and Related Rights in the Work. + Further, Affirmer disclaims responsibility for obtaining any necessary + consents, permissions or other rights required for any use of the + Work. + d. Affirmer understands and acknowledges that Creative Commons is not a + party to this document and has no duty or obligation with respect to + this CC0 or use of the Work. + +-------------------------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/Blastproof/keygen/argon2/Makefile b/Blastproof/keygen/argon2/Makefile new file mode 100644 index 0000000..44c076a --- /dev/null +++ b/Blastproof/keygen/argon2/Makefile @@ -0,0 +1,255 @@ +# +# Argon2 reference source code package - reference C implementations +# +# Copyright 2015 +# Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves +# +# You may use this work under the terms of a Creative Commons CC0 1.0 +# License/Waiver or the Apache Public License 2.0, at your option. The terms of +# these licenses can be found at: +# +# - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0 +# - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0 +# +# You should have received a copy of both of these licenses along with this +# software. If not, they may be obtained at the above URLs. +# + +RUN = argon2 +BENCH = bench +GENKAT = genkat +ARGON2_VERSION ?= ZERO + +# installation parameters for staging area and final installation path +# Note; if Linux and not Debian/Ubuntu version also add lib override to make command-line +# for RedHat/Fedora, add: LIBRARY_REL=lib64 +DESTDIR ?= +PREFIX ?= /usr + +# Increment on an ABI breaking change +ABI_VERSION = 1 + +DIST = phc-winner-argon2 + +SRC = src/argon2.c src/core.c src/blake2/blake2b.c src/thread.c src/encoding.c +SRC_RUN = src/run.c +SRC_BENCH = src/bench.c +SRC_GENKAT = src/genkat.c +OBJ = $(SRC:.c=.o) + +CFLAGS += -std=c89 -O3 -Wall -g -Iinclude -Isrc + +ifeq ($(NO_THREADS), 1) +CFLAGS += -DARGON2_NO_THREADS +else +CFLAGS += -pthread +endif + +CI_CFLAGS := $(CFLAGS) -Werror=declaration-after-statement -D_FORTIFY_SOURCE=2 \ + -Wextra -Wno-type-limits -Werror -coverage -DTEST_LARGE_RAM + +OPTTARGET ?= native +OPTTEST := $(shell $(CC) -Iinclude -Isrc -march=$(OPTTARGET) src/opt.c -c \ + -o /dev/null 2>/dev/null; echo $$?) +# Detect compatible platform +ifneq ($(OPTTEST), 0) +$(info Building without optimizations) + SRC += src/ref.c +else +$(info Building with optimizations for $(OPTTARGET)) + CFLAGS += -march=$(OPTTARGET) + SRC += src/opt.c +endif + +BUILD_PATH := $(shell pwd) +KERNEL_NAME := $(shell uname -s) +MACHINE_NAME := $(shell uname -m) + +LIB_NAME = argon2 +PC_NAME = lib$(LIB_NAME).pc +PC_SRC = $(PC_NAME).in + +ifeq ($(KERNEL_NAME), Linux) + LIB_EXT := so.$(ABI_VERSION) + LIB_CFLAGS := -shared -fPIC -fvisibility=hidden -DA2_VISCTL=1 + SO_LDFLAGS := -Wl,-soname,lib$(LIB_NAME).$(LIB_EXT) + LINKED_LIB_EXT := so + PC_EXTRA_LIBS ?= -lrt -ldl +endif +ifeq ($(KERNEL_NAME), $(filter $(KERNEL_NAME),DragonFly FreeBSD NetBSD OpenBSD)) + LIB_EXT := so + LIB_CFLAGS := -shared -fPIC + PC_EXTRA_LIBS ?= +endif +ifeq ($(KERNEL_NAME), Darwin) + LIB_EXT := $(ABI_VERSION).dylib + LIB_CFLAGS = -dynamiclib -install_name $(PREFIX)/$(LIBRARY_REL)/lib$(LIB_NAME).$(LIB_EXT) + LINKED_LIB_EXT := dylib + PC_EXTRA_LIBS ?= +endif +ifeq ($(findstring CYGWIN, $(KERNEL_NAME)), CYGWIN) + LIB_EXT := dll + LIB_CFLAGS := -shared -Wl,--out-implib,lib$(LIB_NAME).$(LIB_EXT).a + PC_EXTRA_LIBS ?= +endif +ifeq ($(findstring MINGW, $(KERNEL_NAME)), MINGW) + LIB_EXT := dll + LIB_CFLAGS := -shared -Wl,--out-implib,lib$(LIB_NAME).$(LIB_EXT).a + PC_EXTRA_LIBS ?= +endif +ifeq ($(findstring MSYS, $(KERNEL_NAME)), MSYS) + LIB_EXT := dll + LIB_CFLAGS := -shared -Wl,--out-implib,lib$(LIB_NAME).$(LIB_EXT).a + PC_EXTRA_LIBS ?= +endif +ifeq ($(KERNEL_NAME), SunOS) + CC := gcc + CFLAGS += -D_REENTRANT + LIB_EXT := so + LIB_CFLAGS := -shared -fPIC + PC_EXTRA_LIBS ?= +endif + +ifeq ($(KERNEL_NAME), Linux) +ifeq ($(CC), clang) + CI_CFLAGS += -fsanitize=address -fsanitize=undefined +endif +endif + +LIB_SH := lib$(LIB_NAME).$(LIB_EXT) +LIB_ST := lib$(LIB_NAME).a + +ifdef LINKED_LIB_EXT +LINKED_LIB_SH := lib$(LIB_NAME).$(LINKED_LIB_EXT) +endif + +# Some systems don't provide an unprefixed ar when cross-compiling. +AR=ar + +LIBRARIES = $(LIB_SH) $(LIB_ST) +HEADERS = include/argon2.h + +INSTALL = install + +# relative paths for different OS +ifeq ($(KERNEL_NAME), $(filter $(KERNEL_NAME),DragonFly FreeBSD)) + +# default for FreeBSD +BINARY_REL ?= bin +INCLUDE_REL ?= include +LIBRARY_REL ?= lib +PKGCONFIG_REL ?= libdata + +else ifeq ($(KERNEL_NAME)-$(MACHINE_NAME), Linux-x86_64) + +# default for Debian/Ubuntu x86_64 +BINARY_REL ?= bin +INCLUDE_REL ?= include +LIBRARY_REL ?= lib/x86_64-linux-gnu +PKGCONFIG_REL ?= $(LIBRARY_REL) + +else + +# NetBSD, ... and Linux64/Linux32 variants that use plain lib directory +BINARY_REL ?= bin +INCLUDE_REL ?= include +LIBRARY_REL ?= lib +PKGCONFIG_REL ?= $(LIBRARY_REL) + +endif + +# absolute paths to staging area +INST_INCLUDE = $(DESTDIR)$(PREFIX)/$(INCLUDE_REL) +INST_LIBRARY = $(DESTDIR)$(PREFIX)/$(LIBRARY_REL) +INST_BINARY = $(DESTDIR)$(PREFIX)/$(BINARY_REL) +INST_PKGCONFIG = $(DESTDIR)$(PREFIX)/$(PKGCONFIG_REL)/pkgconfig + +# main target +.PHONY: all +all: $(RUN) libs + +.PHONY: libs +libs: $(LIBRARIES) $(PC_NAME) + +$(RUN): $(SRC) $(SRC_RUN) + $(CC) $(CFLAGS) $(LDFLAGS) $^ -o $@ + +$(BENCH): $(SRC) $(SRC_BENCH) + $(CC) $(CFLAGS) $^ -o $@ + +$(GENKAT): $(SRC) $(SRC_GENKAT) + $(CC) $(CFLAGS) $^ -o $@ -DGENKAT + +$(LIB_SH): $(SRC) + $(CC) $(CFLAGS) $(LIB_CFLAGS) $(LDFLAGS) $(SO_LDFLAGS) $^ -o $@ + +$(LIB_ST): $(OBJ) + $(AR) rcs $@ $^ + +.PHONY: clean +clean: + rm -f '$(RUN)' '$(BENCH)' '$(GENKAT)' + rm -f '$(LIB_SH)' '$(LIB_ST)' kat-argon2* '$(PC_NAME)' + rm -f testcase + rm -rf *.dSYM + cd src/ && rm -f *.o + cd src/blake2/ && rm -f *.o + cd kats/ && rm -f kat-* diff* run_* make_* + + +# all substitutions to pc template +SED_COMMANDS = /^\#\#.*$$/d; +SED_COMMANDS += s\#@PREFIX@\#$(PREFIX)\#g; +SED_COMMANDS += s\#@EXTRA_LIBS@\#$(PC_EXTRA_LIBS)\#g; +SED_COMMANDS += s\#@UPSTREAM_VER@\#$(ARGON2_VERSION)\#g; +SED_COMMANDS += s\#@HOST_MULTIARCH@\#$(LIBRARY_REL)\#g; +SED_COMMANDS += s\#@INCLUDE@\#$(INCLUDE_REL)\#g; + +# substitute PREFIX and PC_EXTRA_LIBS into pkgconfig pc file +$(PC_NAME): $(PC_SRC) + sed '$(SED_COMMANDS)' < '$(PC_SRC)' > '$@' + + +.PHONY: dist +dist: + cd ..; \ + tar -c --exclude='.??*' -z -f $(DIST)-`date "+%Y%m%d"`.tgz $(DIST)/* + +.PHONY: test +test: $(SRC) src/test.c + $(CC) $(CFLAGS) -Wextra -Wno-type-limits $^ -o testcase + @sh kats/test.sh + ./testcase + +.PHONY: testci +testci: $(SRC) src/test.c + $(CC) $(CI_CFLAGS) $^ -o testcase + @sh kats/test.sh + ./testcase + + +.PHONY: format +format: + clang-format -style="{BasedOnStyle: llvm, IndentWidth: 4}" \ + -i include/*.h src/*.c src/*.h src/blake2/*.c src/blake2/*.h + +.PHONY: install +install: $(RUN) libs + $(INSTALL) -d $(INST_INCLUDE) + $(INSTALL) -m 0644 $(HEADERS) $(INST_INCLUDE) + $(INSTALL) -d $(INST_LIBRARY) + $(INSTALL) -m 0644 $(LIBRARIES) $(INST_LIBRARY) +ifdef LINKED_LIB_SH + cd $(INST_LIBRARY) && ln -sf $(notdir $(LIB_SH) $(LINKED_LIB_SH)) +endif + $(INSTALL) -d $(INST_BINARY) + $(INSTALL) $(RUN) $(INST_BINARY) + $(INSTALL) -d $(INST_PKGCONFIG) + $(INSTALL) -m 0644 $(PC_NAME) $(INST_PKGCONFIG) + +.PHONY: uninstall +uninstall: + cd $(INST_INCLUDE) && rm -f $(notdir $(HEADERS)) + cd $(INST_LIBRARY) && rm -f $(notdir $(LIBRARIES) $(LINKED_LIB_SH)) + cd $(INST_BINARY) && rm -f $(notdir $(RUN)) + cd $(INST_PKG_CONFIG) && rm -f $(notdir $(PC_NAME)) diff --git a/Blastproof/keygen/argon2/Package.swift b/Blastproof/keygen/argon2/Package.swift new file mode 100644 index 0000000..d3d9c83 --- /dev/null +++ b/Blastproof/keygen/argon2/Package.swift @@ -0,0 +1,46 @@ +// swift-tools-version:5.3 + +import PackageDescription + +let package = Package( + name: "argon2", + products: [ + .library( + name: "argon2", + targets: ["argon2"]), + ], + targets: [ + .target( + name: "argon2", + path: ".", + exclude: [ + "kats", + "vs2015", + "latex", + "libargon2.pc.in", + "export.sh", + "appveyor.yml", + "Argon2.sln", + "argon2-specs.pdf", + "CHANGELOG.md", + "LICENSE", + "Makefile", + "man", + "README.md", + "src/bench.c", + "src/genkat.c", + "src/opt.c", + "src/run.c", + "src/test.c", + ], + sources: [ + "src/blake2/blake2b.c", + "src/argon2.c", + "src/core.c", + "src/encoding.c", + "src/ref.c", + "src/thread.c" + ] + ) + ] +) \ No newline at end of file diff --git a/Blastproof/keygen/argon2/README.md b/Blastproof/keygen/argon2/README.md new file mode 100644 index 0000000..91fc3fd --- /dev/null +++ b/Blastproof/keygen/argon2/README.md @@ -0,0 +1,303 @@ +# Argon2 + +[![Build Status](https://travis-ci.org/P-H-C/phc-winner-argon2.svg?branch=master)](https://travis-ci.org/P-H-C/phc-winner-argon2) +[![Build status](https://ci.appveyor.com/api/projects/status/8nfwuwq55sgfkele?svg=true)](https://ci.appveyor.com/project/P-H-C/phc-winner-argon2) +[![codecov.io](https://codecov.io/github/P-H-C/phc-winner-argon2/coverage.svg?branch=master)](https://codecov.io/github/P-H-C/phc-winner-argon2?branch=master) + +This is the reference C implementation of Argon2, the password-hashing +function that won the [Password Hashing Competition +(PHC)](https://password-hashing.net). + +Argon2 is a password-hashing function that summarizes the state of the +art in the design of memory-hard functions and can be used to hash +passwords for credential storage, key derivation, or other applications. + +It has a simple design aimed at the highest memory filling rate and +effective use of multiple computing units, while still providing defense +against tradeoff attacks (by exploiting the cache and memory organization +of the recent processors). + +Argon2 has three variants: Argon2i, Argon2d, and Argon2id. Argon2d is faster +and uses data-depending memory access, which makes it highly resistant +against GPU cracking attacks and suitable for applications with no threats +from side-channel timing attacks (eg. cryptocurrencies). Argon2i instead +uses data-independent memory access, which is preferred for password +hashing and password-based key derivation, but it is slower as it makes +more passes over the memory to protect from tradeoff attacks. Argon2id is a +hybrid of Argon2i and Argon2d, using a combination of data-depending and +data-independent memory accesses, which gives some of Argon2i's resistance to +side-channel cache timing attacks and much of Argon2d's resistance to GPU +cracking attacks. + +Argon2i, Argon2d, and Argon2id are parametrized by: + +* A **time** cost, which defines the amount of computation realized and + therefore the execution time, given in number of iterations +* A **memory** cost, which defines the memory usage, given in kibibytes +* A **parallelism** degree, which defines the number of parallel threads + +The [Argon2 document](argon2-specs.pdf) gives detailed specs and design +rationale. + +Please report bugs as issues on this repository. + +## Usage + +`make` builds the executable `argon2`, the static library `libargon2.a`, +and the shared library `libargon2.so` (or on macOS, the dynamic library +`libargon2.dylib` -- make sure to specify the installation prefix when +you compile: `make PREFIX=/usr`). Make sure to run `make test` to verify +that your build produces valid results. `sudo make install PREFIX=/usr` +installs it to your system. + +### Command-line utility + +`argon2` is a command-line utility to test specific Argon2 instances +on your system. To show usage instructions, run +`./argon2 -h` as +``` +Usage: ./argon2 [-h] salt [-i|-d|-id] [-t iterations] [-m memory] [-p parallelism] [-l hash length] [-e|-r] [-v (10|13)] + Password is read from stdin +Parameters: + salt The salt to use, at least 8 characters + -i Use Argon2i (this is the default) + -d Use Argon2d instead of Argon2i + -id Use Argon2id instead of Argon2i + -t N Sets the number of iterations to N (default = 3) + -m N Sets the memory usage of 2^N KiB (default 12) + -p N Sets parallelism to N threads (default 1) + -l N Sets hash output length to N bytes (default 32) + -e Output only encoded hash + -r Output only the raw bytes of the hash + -v (10|13) Argon2 version (defaults to the most recent version, currently 13) + -h Print argon2 usage +``` +For example, to hash "password" using "somesalt" as a salt and doing 2 +iterations, consuming 64 MiB, using four parallel threads and an output hash +of 24 bytes +``` +$ echo -n "password" | ./argon2 somesalt -t 2 -m 16 -p 4 -l 24 +Type: Argon2i +Iterations: 2 +Memory: 65536 KiB +Parallelism: 4 +Hash: 45d7ac72e76f242b20b77b9bf9bf9d5915894e669a24e6c6 +Encoded: $argon2i$v=19$m=65536,t=2,p=4$c29tZXNhbHQ$RdescudvJCsgt3ub+b+dWRWJTmaaJObG +0.188 seconds +Verification ok +``` + +### Library + +`libargon2` provides an API to both low-level and high-level functions +for using Argon2. + +The example program below hashes the string "password" with Argon2i +using the high-level API and then using the low-level API. While the +high-level API takes the three cost parameters (time, memory, and +parallelism), the password input buffer, the salt input buffer, and the +output buffers, the low-level API takes in these and additional parameters +, as defined in [`include/argon2.h`](include/argon2.h). + +There are many additional parameters, but we will highlight three of them here. + +1. The `secret` parameter, which is used for [keyed hashing]( + https://en.wikipedia.org/wiki/Hash-based_message_authentication_code). + This allows a secret key to be input at hashing time (from some external + location) and be folded into the value of the hash. This means that even if + your salts and hashes are compromised, an attacker cannot brute-force to find + the password without the key. + +2. The `ad` parameter, which is used to fold any additional data into the hash + value. Functionally, this behaves almost exactly like the `secret` or `salt` + parameters; the `ad` parameter is folding into the value of the hash. + However, this parameter is used for different data. The `salt` should be a + random string stored alongside your password. The `secret` should be a random + key only usable at hashing time. The `ad` is for any other data. + +3. The `flags` parameter, which determines which memory should be securely + erased. This is useful if you want to securely delete the `pwd` or `secret` + fields right after they are used. To do this set `flags` to either + `ARGON2_FLAG_CLEAR_PASSWORD` or `ARGON2_FLAG_CLEAR_SECRET`. To change how + internal memory is cleared, change the global flag + `FLAG_clear_internal_memory` (defaults to clearing internal memory). + +Here the time cost `t_cost` is set to 2 iterations, the +memory cost `m_cost` is set to 216 kibibytes (64 mebibytes), +and parallelism is set to 1 (single-thread). + +Compile for example as `gcc test.c libargon2.a -Isrc -o test`, if the program +below is named `test.c` and placed in the project's root directory. + +```c +#include "argon2.h" +#include +#include +#include + +#define HASHLEN 32 +#define SALTLEN 16 +#define PWD "password" + +int main(void) +{ + uint8_t hash1[HASHLEN]; + uint8_t hash2[HASHLEN]; + + uint8_t salt[SALTLEN]; + memset( salt, 0x00, SALTLEN ); + + uint8_t *pwd = (uint8_t *)strdup(PWD); + uint32_t pwdlen = strlen((char *)pwd); + + uint32_t t_cost = 2; // 2-pass computation + uint32_t m_cost = (1<<16); // 64 mebibytes memory usage + uint32_t parallelism = 1; // number of threads and lanes + + // high-level API + argon2i_hash_raw(t_cost, m_cost, parallelism, pwd, pwdlen, salt, SALTLEN, hash1, HASHLEN); + + // low-level API + argon2_context context = { + hash2, /* output array, at least HASHLEN in size */ + HASHLEN, /* digest length */ + pwd, /* password array */ + pwdlen, /* password length */ + salt, /* salt array */ + SALTLEN, /* salt length */ + NULL, 0, /* optional secret data */ + NULL, 0, /* optional associated data */ + t_cost, m_cost, parallelism, parallelism, + ARGON2_VERSION_13, /* algorithm version */ + NULL, NULL, /* custom memory allocation / deallocation functions */ + /* by default only internal memory is cleared (pwd is not wiped) */ + ARGON2_DEFAULT_FLAGS + }; + + int rc = argon2i_ctx( &context ); + if(ARGON2_OK != rc) { + printf("Error: %s\n", argon2_error_message(rc)); + exit(1); + } + free(pwd); + + for( int i=0; iQ6`gOJkiS4iU^%6A$C1t&b847iegtXl|=krs0>x& z0FA+G*JFFTNqREFHS2f;Gu?A~d~bKKj%T;$jbkF|&GRAO~BpGJ9SbFtvZ+B3Q z#4}tYzN@@PWW3VLUG+l=H{8f{Du&kZF;&cS7$_< z?i&0gPx$8P`So_~h~r}yzlzd3-D*8uZ`Y>d8!pS>Lzi#q%M5+fwF;=B7_O0S+GpNE z`g_x7gzb7d!!_v4)6=O>ON8F;0G*$PYm_$?>B!IT`WWJIjb7gA+Am!%&v5nT5Ijjo zVU)EAlV>PV)k+;{G2%^%Nrba>f}-^BKs`*{WV~z+rhop^OMmp&J^gO@Ft&5!y+ewx zziiH9zrM7zB>%EemzEX|FD)snS~Pr7=4Hb#8+A#g=aLZ$$zq>jd7|DXt5U59uz=5- zF*hmeV8G~B!0P!lAozoxAQ8IgH1N@eCKK4qd~A0alIe%qAz+bJq>)!Y2ewX zfuD04_=?lOpE?cvvD3icKMj2OY2Z^&!-uy|gAepR`7`e{@as+k|NCj+zda2+;WY3$ zr=fr1Y4Gm``~tkapF{u#Dd#9{tun^Y150(dvZW_nDfbpv6wNK1U9_mgt5kXmOUjh; zDsQDSdv>LFZo&N71;z7c&zoCPs?4h>D&o)FLLw9t&#h1jN<9Vhk*K1y2&wdpZuT$> zJx~CAZ;`TKZYfZaCwgE3Gp_K?UN9GUD6puaLYY@yQBvle2ZVw}b7#*hDVtkba(j_d zse=n-@&!c;DvP{I(W3Iwl7bTN?EKq{Dm+M2Qt2reF%sEUcs*663eXD5ZzEX+3(Dz~ z(Phc(>X#`K+^%uivqxMqVsw``V^o*7+c&b?JK~aV+>x2eY}fSM*&w{=rjkl;QN{G! z>{3rz(e%0br9~j;rUjldy^gabYM+QyF46cGgV!W{48?Z&2#ph=7$sh~CV@*V>Zv#- z1lQ2JwZFI@YAaqjq{HqL;W*`#9!@SPfvSvE5+G+x^PGr}QO?lARp*vKG^3UC^zd2{ zAEjJ`Fdt(aHsoo)Zs+XK$5{jZhz?)dOa6>--CHt0>R4l}A!JhooN>m}8UY_<0O9V4 zfKwM4Pg?|BH!(z%(H}Uz^ibk@e<1uvMkaK1`s3l%Ss-G~5paDd6Y!)6xNZ^%cuEAE zWf+e&0?smwCp7|Y@RV`s5pe1dScx6ag=ffJcr?iz47>MBrCPz&Vx~&&mk+nFbK<>InE*5%32i;Acm` zABljcM8F@9fa{_h5&OOOdk%chf$uq>D2FWP9aDXK&1xXF{#8X$YwNsG;a1hxWNsAf z36Fja;PBAR__dsGhmYZ6#)aC#c!oAG%%LUJD#9-@%poOIC&GVYm_teESrPsV!yGb0 zkBIOe80L@?S}nr=hhYw#p=uGnmthVqp>h%aIl~-MLUTlT8N(b(LU|&5JHs48LUs|Z zV3aCm1iQW;mwLn5N#f zw-Zr??!Z+;Q{i#@*V#0)`~QwmC0V z19v^!N%c3Yey7ck4!y)E+}BHJBGxeaFtXtVlt$V=Z}!938LF(=_gAN1Z!s z!+3v_(=zTez@1r#Ecd(u?u_?;<_>_pKD2?!a!)-H2i&$%wQgIwrS?^ZoVN7-PTMF; z%|dWQ`!&k~iJ7^zy8!}%+rLNqC7>?6qKnY4Z<8PP0c#1FU=nGbd% z&lEIBGFm3d=|AA~f2Hlbgrc^{r1~4RcSIQ*L?Vx#S+0L4&{(qh8swo)#m~i4r_IQt zjJuIxu?ol>=4tilG7-Ttul+7ebUtX)GO$`6jYMPk(huwnmoEr65)BKX}@f9Rs7rk#f_a@#UD z9G_Lc;rNZQ=lmKa-v?w?oKl#|ysfH#pSB(*8j1TcJ6mwfC}g9ZE6UlY?K&0?hqj{W z=B=JL?~-o$q!3kur$Q#!QFNJ@0B;YO_ysc1W#Xh|#b88J8vIRa)&?mP6ok-0Wc7{w zF9lG~wy<}$S0LLLAvQAatSy#hu|PqqBq96IACaQO&;` z)k(SQ7^qS#EA9pc$VnzS$sh+hVc#m`-mMcZCubsbf;S5|Z}!AFqZf{)_9M_{=nWL1 zgkAtu8Ak~6du@Y5k5gt1sksl;q|^*1b!h2ixS<$w4S~xMXy0k*zzBV3=s!b83jODJ zg}M&p4i@THE0MyoQqa2*;+l!QHemJZeND#N`?hT4@7$H>rO^^Z#RGsBAwPIfR@_bz&2FeW|5NG zI5_%NAfZu2>rpw|wWnn^c4lJ-#&yi**LpTbkQBTTya-K#3?YzC846u31EWMBLk8?( zNVOq=I-XjMe0JVWNg4a{E^uAjb{o>w2x%B|*3S{!h*)6$nEn@}e;EBwN`EH(55wFHPXM0{zdR>f9wz?Clhd!jGRpW_0pe4{}k!BlDcf^zs(Q-<(rJld6u4%k#HDIgmx|i_7coPqDr$BvOZiCpuO@tp^#33F zH_+dm{4$x`!X$s`8TCUMHJDNNAgWsxiSdc{A4XySmK#<7F4ebtf8O-*8Fd*{ig$q{ z;WK~pyrLZLP6YTb){wr~pVP_e^qvXRaS4#1>%9r#Hp}_dXkw*NcY!}w;E(umRJWY+CL@DfJp*&NZJwaZNKg8z$J~IlO4Z02s&C9iz(i|)<{hX8 z#vDN)d}a!MwE_^kF?JLXO*b@l(~ErUN`!u7exaLytJ4R&%6}XN@XWxZ>49l^8;+J; zKdb4}xfe`wo77;bEvZn|%`IV#_&b6zVxcHFD#%)*6O4jF$ZA!o1PHN=YslEzam4b> zeq{Bo>i@Xu(~=A7O!aM18xE9i0<0~NJ#)jcrEXhF)2BbZpx(6M=)17Mvg$1pHnB5n z*I&sY#d|gf8WjpQSt!`5RE&f(ufL&@>L+z1QK~-TXy*R}s&arfW4IXR2efa}-am{1 zp=-Q>Y3QBg3rBe`%&4mkSBzQf;YS4ldECT(J?JK<@Ow408Pj!S!5n9Tr6w zJCSx&PnKu3_Ga-{-(>Olf5_s_EWu)1FBaQ%7WWvh1v%Q948id?)gaZhf1lv_&S5&w zUs(G&1gia6ZT2C`RS$XVw%^wRCk!M0aa0@QjTyt)0C(U~F_5Z(hiv-+hjD+gZrnd* zdk_G3;3*r&RCiXLWmz5crFvzJtqlOnvId5-c376R(BnHEZCQQ~d=5Xl*e7m|=WYm8 zip&3&HUV_0erb*U2{_>QbeoxMbK3?$0{7JcGRrogsqwZ8Ho|_x)cbMv1 zXA=vYUTa{Lq!bhrq6Vj0Op{QWOsx?16@vohOOoF z?~kx(kQT`t>vy6#wO7)Br5D69{>m`i&-Ozgp0J#+IM&nDYX6;K|2fuUgbTOd-5tgd z9S+@rAEEag>rvNmXg+=%>%X9vvs^6}M9TQC`Ct!uf-F&nG}gf#u<0`Ns4hc~3gHqm z^b|SLB|}d$L>YRX9w9?_z~?|0j+7yXzr*E!Q&Y$(U5qGdP4%~xZSR&MN|2BpU1GXL zh!RHG+F+6s&~6#>{u^Y7g0hyAQXx#;a#UxMf<*6)*k zxY!j3XLRBA-*(BzD?(m^-C-dww*yJa!z)5wZbsPgijbGt@M`ZM!*7unL-#}|>W759 z7|KYxY63=)NBlG)T)DLas!oQ{;GGKRf4KV+?#?FDMD;s+l8~eg;W7RY|7qoL*4Ku-^i~e*LadWe4!<7)9zZ#~r}ZHhRoC_@6RY+1eXF}YMf<+m9TxiF zneMRA2M_D|-~sIel=lCKK4@jd|22IeR*e5wRKiHu%-?1&a22UbE9Aovj<6U0c)o1? z4=^|ALo0kYd*McA5}_3)>zOV`rZg}AZT7-r$nf8y5B?Q<;SsV*mzRIZUSPZYr&U5U zjN`uTg(p5^wS-Ez`$tkJHtz02DE>3{!pkT+LO;yFQi?H0ord+FI?PbTtO--p%pTvG zXdydZ#o!TkWo@@Cdo>2DbN&-*ZP-%tgz&%X@V}wCSu=m5E34VE>|TJe?&hzti8-&F z1Zpn{j89a6A3>J*w4nW2faHWRml^t)k_(r&(}Srf=7^s={a>iTb+&3Ga{8YWrt0{N z-OPtWtagoxM#rZDuUieQ6IpAsKP8)G673oUXmd{#z#3ukN6hzr#FBzVJFt3fEM8+B zX*E)>MD8?}t69nkrbdwr2nxT5W;(r$GMl8qQo9Iza`;YSnd1>4U`o0T2?F=oTH#=_ z{iy9jc(?+z%El#DvFu^z?AeYG>Q`zj{Ds!^8e z>%7{s+=FhX`j2H`F=1bwZ&#hrC;11Ks-6L+{I~B^hYpDOSoPJ}^ADUz9zt~;0$cKJHbvn<~Y7|2NV#0<)RyDD{PO!CK~sCt%bZpWfr<$EX!-n#Et zl>NxY^DQ+Ckgv;sz~$T7w&AF!esVCTpF8bMcko)2do0ni{4T^#1Q2DLB@4`C!GEhb zi=~D;;8>2!f6(RIwSU6_P@QXP-Eg$M#m(y8h+>b&-HL_F*W9M9P4%}giAmmYY)SGU zG)Dul>XD+=YIu?ikC`6HK~yW=Qggp((9b(vrv2HLHOVpKMkJdOT3x|BlbUs`@{Vzq zHC~em!KmyJ-WXHDenN|Y>5Yj3GjanhR%hdv+%`Z!-0J*Sgqa~GD9LMb}oPG_>2}* z$6BX^$_NC;_>8ShpWb5Kq=eoEFC*{LWb*8w=6x&DzgC1~0nSpHEj`CoGwNroQM)|0DI?i+R?!)psp6VD_r-XJ>ZR z_hv}Ux7T<2dH-B}g=zUe(7q=w`1bm4g1YZj->_}mT5A7}n#h5WM_%wzlVdRLR~daDfa8&~IhVuza)ew#JH9p#u*pH?E*Gr^ z#uu8>F8?todwqNv@B=r?olQlL@nt3AF~0o37+((EiSZ@+zcs!zV|+P@9wx_^<1XK) z#`y9ch6juEKb1iGBs-4h{gR)37v!%0dsMG3@=kLyev+LlF%svis5A{hL^<&(Gr>^pcr1J zVt84c5F?>K7ElZ?Q!$t9_Vj7AoM~+1yVw=zZL%*sE@XX=Rty_c zLhkR;J63*&-cgNKj9g#*f1rPa{+Ig4^6&cxdd>ep{}?Cx$9~a2B6>%3U%jK`8@=Pr z$lmcnukWSb>>uZ1yL)&45Z?kggk-~vli*+U=p?>xbdtq@L=GW6zVCkDKfWJ7{@vrp zf3|-d!>7-P{sCc!wC~Y=jPd9D{=u~bv4GHHsla~ZyVzIXj~}$U{y#Q;_?mz03;)=% z!i|1_EljQwI0Z2;siszLO|t_QJ8rhvo+jt#2w&POe6+A<{E(ua;d6T7SI+4fe@CzI zl3wAZy~0xqdZxdnSNPYx!tK4v|F2&0Exp1!dxa153g2*J&+_N=3eW2mp5H6HpjWu8 zSNM)z;T65YfnMRedxd}1EBsKe@MFEgPxlHx)hqn_xjpM^>c!7K!;q8y#4}D~_)j|g zM=;aHYEF{Fzxm~Oh11-lriEqxecXpKK#RJH)7=g=cyWq$1e^Mt`~=XbwF~ba)zqjx z0x$M(4p2?)`0Szm0>M|gk3}_Y(3%jynhW*>J7benQ`eU2-uoF}Hp_yeyel%WWtauG zY{uqcPS#Oeh({Q@S0}oC#`iISM65=Gw_awAFD^r+VD5R;ngh>`ze8o zvqB5;UYDc>#%dS4uqb{Vb{}Tcfn0N=-LmG$OG*sZyedG(yw%eGC;IjE;8Ut69!t3D zzPS7Yr_`nJ1Cm4itnQlt2QcY)a=6qBARGdPn$CJbLVPL``P&l5PSP6tf zpm01nD3zG9aOrq*Bta7dElZ{2$>H)dFySz{a6CB)5(tMt;dpYe#7jPaHK(_aGN z5GWi^&HxF7L!fXxIRhmS4uQh))4=dO+9iM;$u2ipYMDbIJN0ylV`XA1 z6Za2HLzC^%4scC3JM(l&wjvtkr8CK(z_d&z;dvO87gbdp$W~96@Ygb-T`x;z!dk#y zvV%sq{|Lj+pTo?1eWa_Jh&>bhIN9}uu5opsGE)uQk*Qz=Tee0v2{+Kf8iIw zMSwyH)`q=im;W#-S9Kw4Kybuo#-Bp!_?fY*3O6*m195>{Q{2H)n_Xy2>?1{A8Ks8b z?qUWYmN;LR-D1>!_MIJ6bu#bJN?_;{t$Lt)$i)3*6%?f4Q^9&ulcwd zFsZ(JvpOOh^f78H4oezQUHLwN_}sw7wm|G`;Ec!7#3Wb!?tWa~ zPX#izYg=m0hX!=l-@pk2}ITfl!b>lUyW?B4wIN)!Mu>Zyf?8)P6TdFSi?{}uv;dF^_2aYubu@}Jd%rUioA5wKzAC2~+ zq%GkFbpBRMz5EA^j8;3-nuE@(PX!lW%@o`9a@sc=_9#-QEWmqV(3y2A=*}Wq9|Rfr zq~JY;3dZuxYbd2Y1PV5x0+R04-_+l}rfRW?;g3+BfhmJ2-VbuJ=s)gTYBgDEoDfCq zWLZzc8V3`w^JJg4re2&o6&pV81oCaC?fdj{}niyTZ*b-^LV2)>{@Ix7i1>txk?Mj;!X2A2|KT z(i(7LdYiU&2*<6eaupe13`CY0TYo;?Hp=BcZ&L886S=VK>p!>p>(ztW{C@W` zzJ!N0QyulkVq1#V@hW`1_s#?sk`bw7MC}nv&3d>QTV22u4~I&OR$V-CTGo`>tYivw zhJu9$+bAbAN9y>DBleZ3ywTps(hdZE1J9 zjZ1Tz)}!k-pub0y_$6#&1g(^lE3;AJ>i}eY(EirTdk_)&9k7`+h@@gyLTD8r=;}h0 z=R-9xcA zGLf&#^Bn`mC-hI49vrX%9wqBF%d$5_teaB316~UKyKu*VMx5N`9Ylc%GYRFUCLP)g zFD4(wC~J=5U4rkrrgQ3iZ6PsraRw%&jAw%Dk;PgZ7zkt2RfPS8s9Ms$7G@cv0t+)K zO;d=C?dIX$^Pj*Jl|pm{jCng*pn)sh*y92gw&Jx*9hocS3cIWe}2xicc=l-Mq2%?QfLuwBfT5tP$l zyO=2>C?~*nF-JyFPJQiSc8s8$^kPJn#mecf9Z*4qoaovC6+vOzEVTnFf^u?e7e)%Q zlG9o{pdwaIXzhTCpq$Fu0Tn@5IH;eUGg}9{I)qqxQN@qjzYF?S*dW>t?1FIwa$wj~ zD_Wju=H4V2v99Q|)ZlQo_rhElW*gC@@-sCzK!e2w{boBHm)b%#`WaexSo6)C-@d_9)IPM!4}^iz&61| zxXO5pAkN;Lbo<-2fQ3s_onWKRP+ID)=s0jPUiCK!POex2WyI;69a8J|YZ3iZ4VbZ~ z4{rnJ1QR>etc_KBLk@HzS1?F}Q4O4-`WhvUKM(QmRqb`?6fV0F;vR_2x?Si;i`a)K zTX-Ii*l_ZM9J(e?Rlw*mankm^NFCVYyrq~_auOC7+KG<4y0?DN_p0p8i|CCwKPDz_ za*&dfH%ramz%|vsFx>@}JSVUaYZu2k72^WO?Z*0vrDid(0@G|T&p2%@Ly>_qi*d{v ziz0f3Xh&$jg02$hJ@#nt!)6P_I*C(PWkta~5BiSB3P+xp9SYLdQ}~(nH002}1^cf$ zv_+^k)@Q__L9s;B{&Ls&BXm>yliD_@C$eUS`qGwVpZ|5~HHlNz7w5m?IR4~&zVlr# z59_?|RK=mPtFZdly@Im-wjoQ-IOmPF>-BxWb)x@Z!UpNaE>4SkHJmF^S2Tjd-Lw430TRF0Asz16!6Wc_wGnBC#O$1@j=h3rp<`l56Q=^YY zSq=U-CM}Q)l49$$_VK^PYlS(>uAr!XGJt8)MuVyP7^dzJIILWqLO^<6u)Z{3$q_ z`=O3EDL88=EV)A4Of}a6adEuK1UCMeiU40pyJ#)MN`rjt{G?lVE%wMp)bO#(eBH+XRj%fteTxI2Zrq%-!p zv30myq#A1!gjC77rY3jB&ZdIn`|E{?q)rKrik=*JNKoR=IKUJwDCc!h_2XPnlzn{v zjwC0vY_92JH#juM8MtwQAv%ZMWopVbop6Klyxfd7rmVB(nws4ioh(mF znHZ=Mj6x1pw@D+qj+O+p+>DbGqaQL5Y)t;;Sk-jgm9fKs8(D62XLLx0$oQs46a_vE zjw1`~G^H20Gd>|o^VqN8%zlU%`= zv(=1xW|2oo{n)_dbcR&g+QshZ`$Q83CXbpDjJe2-PMq$}sB=fx=-^Byszdf0b2Gjc zl}d3(KPn<~O`5pr2KWLAvLcWgkMBP@)*V3miPfbk!K;#;fq7RML|Y;8=2P4{)eT_w&Ft5}%ags|JcO74+ICaXpGKHDR3 z+3gOo%*PR6nR#yH&?zvUvaKZ$DQMggX@`!a>LYeEfzJYn9E8?v~1!1uP0mZU`G@#-wYvTMXb|YVGQN(h*y4KKT>PSdsyt>ve?>lJ2D5O%7i3!ZI@~qi9y&uAw^x=rL;%pSOKwi>rLbX!Jd$+t~Jy-Adt)n z>FU}p1wJwdYHdPhFXE)h+QYt<-+#ohk2+K$VpIg1MXdzmX!onwQ4daxwXC=oA;CtN zXwdJtrE?`UJVFiycgb%!x)ejE_$?<8pNJG?qtiCEE743sIAVwF1+tc$ z-)9)V@oa9Kq#$(=f@FC=#$_#y>-Pg9dX9D>K3oXvu=-Vswc<6a%@bzA+6cqVqbpd^ z{(|<@f5AwIv}b6N|Jjh2-@{{!kG$F9YkdB~ZD|lGIptcKr&wy|0LdA6p-q(jHq7^l zLFeUjs4A#L&QixzBWB@cqzBE-n+vr2QL4lAkmy#h0DMiU5huobw2$Kzrk)sFeEA$# zaMpR%x&D2*X&r9=G3^@UfnEL{On1@K%09yMkb~GJb<9%K&++jJ6J8k;`diSr?@ zplFe8&}SP3Wk`uevZIk=Y}$4#!8vdl9^IHz?9on z&8a&CRdc6x<_4cTo5-p^#>V+*AO^XWW-1N59WqPNn zF@)OybRx9tP=0P!M^yv$P73VBYBl&&tjIO3G1LfaQZ$#?zTeB<%c#>QV#a+gv}p9* z$nrB#50UN!rMj)hgX==1dH)NgT4)KbB#F=w)``OKB|J1H-WC0SV89JHg!%^n3y=8g z2~D4daL$JCeg-^+p8SJL%p=eSx1AYd6Vfkwg;o5fi`Cr$Huj{*o*-0^6nyy+FS38Y zDq^{09Voj;2b%wM&+jEdFQ5&?t%Y(|I9A*u8UZ6qx2;G* zdEo0HJec*0uJt+m0ijZ9a%VMImTe=NPjt2x5!pt7X@Xt#U8h3w>h*4Jsr@Y@nk9vT z4dOs~^qjQU$ed693I!0XMBn^G`xH}rve6SA_$74=`IYYWcj&B52Z%}bB8mK{h*MJO z+8a2_r!(`8IohiX*_jv_{tW?X%`fU!OFscu+w^ZeH^86=aZ(S3u0@{^QjJpDTQnQW zk@k9|MeaYz*$6fYyZqbnRg=?Ue9|1HU+?i8?)xL#-h`Z1CFDXa)&7BPTJZ$=IYY{sETs5j#} zfucoYL8q%*%)uca?En^cx>|EL15C{tZ%2T32ie_icTA+w;q)8L>zK{+f^L5c_L}Mp z^kAw-PSw5&71kcl_63_A>sYz=-%FRxf4?^LNjASz!F(;E{q=d6o%1GWTp0SeAu6IP zfg%xw8?|_Aqq{%w(D->Uv#!FLk;@O04;B~(BGq@wZdrbQ43@6#qEgLTRHxpItDv%Y z;lU$_KvUg`AnHn!nMq-@G~iapIhf_g$f3-#V!N3I)rdkIuOL&!v5vpQ$FQ)Gh9zzv z+T}KNXixryyn}Vjb!rtI%xQW`WUl?fAfP`9Kz7f9=9lcq2Uz+zW!9e5d06vVg3x=Z z9*O7*!YE>S!R$6Q_>fpCLQu`$vNUxmE)&(oI&?h@$qHjI&}(E|}nntq8Z6NJ|I35BPdX(O0(lg zQsEBCnA~7lU6;)y45p(nm^K{UCKF?*?(*RNLhVVc zQS%GDl~cHMv>`=;59hy`5cHTy!5FK{zgbFE+9uasC#w<7pw+VcL>%sTkCaDdg|32D zfWc&S`S$iQm^KT8X?s)SfeV^Jl}?Oi0~w-Qxzo_izJQU%LKZ$NK7Y7^%f~{hTz7p{ z2jHu7@T2KOvs5qp5hs`_HqdY}s&CH;zj+)WdjT2ecUw>3W2P%OAjS>dkcO*q zLs5qw3;l__>iS+lDc1#`->aaap+i3JCHoKHBZH{VFxIDA_6H;A#KCHc5cC;j|ClSd z)R6rZ53z}KiRb93+5vFr{YzI_Ts(x{(wvV`ETIpaCfSz+Ay_j%rIj4%?%cScKuh=t zUCMV79H)Z&bc+|8YMS?QV<74mf{;14?^NjPpB(fD1>}wAY=comj!l_&{=D z3Hznqu_F8B(Omx(Mov5%Jx+=0DLblku^qyIZ<~3+p1fCikAP zv#@KX!d?=~97g|C?>bdYM$7(rHi+w^e;$EJAb1!CXE#O&In;0P zo{>@Ku&fysXJ0ugUJc{2Xsu!&`MesnlPx7?ph)p7AVLXJ|DA@(OeLzLd!VMYXF}{} zvUp&wzE(Pe;N3vm0MzI62fU00iRU5z{cB6Rhy(5&MuFRZeC1+kk) zm3}JpImQqn9?sy+HIj;XnCSEZWr1Kd2wR$Rn-nUh>`2lpbkgqvfy{e=k+=C&Xei3l ze)_N&Olu(bP%HQp^+SG?I!pCq@3 z9oM@2M;$XTcXmw2htCzxWe^KyRPim z?pQfB3?*S%pA?+6eqsu)6lizsLtyN2nb;)h%DRgM)vCJTdepUxO_(*an3aA#4oV0xrUb5sd4D zE?;A`3s>;_nh6fKIB;)r%M5%lGMlDl9BKFJ>rwvwzP7QduQ@81*ASe!IjEj+2G!45 zzjjnVWFGQbP@OR3y{27r4sLDQm42`#0RV?{2rjLF*I(~DaJ6s$*pBx)c7PGUuh+~S zTRK{OJBRt&E=|~!(6I74ES9lXv||GC+;v1!-Hj*Xy!Xs8{4sitN=iA;cQp%_Na~VFk!?|2{f+ke`umgtk_~< zVWeY3mTGUShh{QoBa@qQ=PlEbDWkgL$p~X5X=dg_GUEoM`lS*NeZF z&pB>?eRusP1(WafY3ZtItNNg7YN$Bhvfg1*!?;Cet1~#W)!{pyu<**|v~n$?YOMpN#VkxMP?4%9 zQHcdrk5w;;8dLSKPQ1fkN9tF*gOfr12dxB75V2m8 zL@?;gb5!4{Tx_XT1lpbWiy*aM)4Z*|lN|qoUd#INWVxi;vf^F9+(B22>*a2;-)xUV zd1zp=JDB_`+QcB(VYwHpn7>CN)E3q_>f zvaAy`9LxG~;OuMe;LRXZmyHLzI7q$_*6w6mc?mnGk$b^k58~MuB@+d zjS@-?@$X(>`+Te`>kUiID6~T8ApzM42ynI%@^cA^QL_$MYStkTT7(~QrFf_kJ}u#a z2zw$@ABojH-`-5zfB6y2l+{bh0_xcIy~?YjOEM3(T0M9Tw^KXcvpTH@dSrExsg? zw;eOi%4lDw*|O|>(u6iw{nMhc>KIs@+==gbXLYK}F4Gb0BLk^a*|g(scCP^>rSr% z&>gHisRjq1B@5bfA$F@qU+k4-xV0~kH_Ny{lu_@-9}@D#>17z4@I9uNF-9+=7G>mO zD>r6Tg6HqB%;4ZeSwszr$f!dhH?BFxVnY+pvY4Q_!hzgWoD#G`(_vDq zy?lfs&aVUxk@hRDlrZh`45n?4iMGK!!p-@MqfLQaOw48|+KR3;1A)UCPin-n4gO*h zN1IG8t#|(+8NGjz@fxV6im;;l;0hVr%4yM$^N&;^=&!T9Fg@nfJpc5#Qy~Qa6<;r1 z!O0Mxoe-NP(f;P#fO8Ct73bAboZd2Z=K4dfw4+j-EXy*GN*Ae4AyUbv$^K*$MJjYA zOYCufbGNucnVs3UxaRu5=n_}UveDi7_ZHdEIep|GgYJ+W?QeAj+_A{t9gprXNz_MN zboZtS-(@uHVnF|3n=Tu}{C9O`;B@Li-$wJ~z~Cfwi9uQKg8m`j$ltQ&Dw|{F62ZXB zp|bo~LwLa*3l6%$!J&ZktZxLrOAYktL?^?4CeZDX>+O~`F}9W2(MKB4K>XW6{;kO0 zc_tD#&ps9U#W(VIti!r=IHZXPCsr1cZT>o_gTVBd6DZ?EXc$yt#QG_k7bK%$9bnH1 z5Pc$4P7K37G1BAY6r!2I^NjAAN(EwU{;lAzBj7yq1ZX-DI!T@Tt6#9#vO3O3YufFfczRrS$I1Sv245~Tk;Cs-1d=fIg1VLkpy>001ns|x%vHj$Lug#qIkTL7iAkLPa@()U<+xQCcH!VxEL z5#n)Nfdd!Y`?#@~IKmMJafu^C`d(xYcN-H&IO1F`ab!r}pzPrmWa0=%9MdI^4C%X; zJ=~W}99a?R?Bh{hK@VKgBkRi3yR4TC@no;SflD0h#AQCh5odZ?mJGo_kT}?$OB~^d zBfi9uA$(tvIM}mG9N~x)zr>LtYyprs*vLy9;fMpk#E~HZvV za7!F)`6Z5U#PML_$dJCf+Qa?R#1W1-EleC4a`G4%%feP`;s^(aheb%9C`P+mD1*U! z6d^;$#D^w*L-e`oT@4CUvVcuIDx7Oh2&W>Pf9NwcTXPC$$5%4h@%+lK)$BR|;cU`@ zr}7Vdsb;qU2&YvCp5(^P>{9^3*$Ch}>fuBD-PX>tdE1VN->-Q{?(i4<-L@a7CxOYyrm|G*5D`zTJT75qx|ih6h_&--nw7wLBM6xg=5{987thk>*~-PW0Z z;0ATy2RwLJ(U`&mWZQ5$5x>p(2ga!Tw(+6Hxyd_aA$cMkrOf~q#bhc1qG^Wm^)v~G?*uO9w1f9hwXhHbh# ze=2_A2LpEHPX$L&Xg5uQaXJzr&^9_vh(K#fj><~5WEmVq^*|1IiRyvmeUhVyJ}pN{ z$>%~Q`sC;@^AA0*W*>6qSHgMyE72(LL2%JFAtB96D>}t7!imQuztMUp)ulf>nt$jO zHTyNBgVP`}Z-zV}ZsG~ibeoV1TCnXak$fB^i*aBnocyqgQ#Z8>FeEYT8J(L-yYB%G z4FaG-7XuD^NxxS&FPhnS@}TeF%o#{xvX9ChRxi7q8k>Lc4fV3H_^r#YM2Bnv0-P;6 zLPGvEKxh#N;KpB3M^pfgfzV$>eJRj@@U@!#A##AT`zxNoJlw+HZAS&x2jX|P5PUT9 z*P;uZR1d#K3b#QBi`0FeNa?mlAZ-$Dv6B)~(PYiP1>)Bz+Ol1jQHTq=-YtJs4>#o> z_^Z0DDMEH3ArPPqETN(12+$<^*q5TsKR}zKda{ylFV&^9l8!38odVRVnKF`6fksXZ~!3u){9z!qo6)4sNZ35bn5d=fz;^r zTR?pmM_FG8;@9dn)IoM=(PpgGW6%N{W-4maDoAb+ z9a?hqeS@Qsgp;N=AxLO6wzcfm7yzIkWVePoI|zA%Wpaqq!wy=Z zE%cjImP&ihBMmr2!gOOwILK8Ravo{9ArdwlBjGR-4v3K^8zNz>F%k|Vh3V$ejWlu4 zjFG}fTPkffk8YibV`q$%HIc@eM_Omd7WNn;;V@e`EEDVj2ZSNUNCX)vpdM+DArclC zBjGR-4pa8mQ^I8=97YPLM_OP=37dF)-ZW^kKL&Qnt5fcUpkL zI2RT>BV~K{VX8Ayws#*kIwNI!_hFnf5)BPUKVTSXl`~S-L_cmAX^t~ews#+PI3s1Z z=)(YKq-^g#EN@22_U^;vW~6NIK5T79%J%NV$Y!KyXpf2}ty1?*kqvA}fed=;L%+jL z7Jk2f{{aFLxXy-qZg}@c%`dSs*Y4U~)2+r+M)|H_WHv z^xr4g3uLuCQ|~mjn;QK)oWX*yqoczSben5S%OeGHvTJY?HeZ zhqmSSMcz0ym?wE_dF+y(PEj3f8VIcf0p0WMYHX5H@C6M({AEIMdpbYRG^EN&@j>!$`s4Z`xe$>CdKPO{YOMMm6+#aV9~{M<*7 zDzeq;v185=-Z1{kvgFEQ);ipnu@yD*?{tKlP&fRETOg;Co6b$B+^0A%j+!N^fehq0 zirbPW^NT=W$zV>x@yP%z>*#<*hTXVeCO&M6KQZ59zdn2RA~ae=&mM%9;8xLyD_^?# zA=-+6fBY(}m~pkA4QRAmh%5X(^?|5gMvJx^Ck;dAfxKS(nFQm<;|{VktRj&8OTd(f z^#+~)+|7wPQ86uf9!v2e-Qg^x%Cs86n}x8b5}RFUk$ctUoYhsj~biZ91+#bJ%48H zJ;ZTYP(?h;SA);l<_rK{3zvMu*f4{D`0TGO(RbP5k}Bh-DwJIesPTQtXe#_E`_LaS zN~^(jGP5JvfxqFPC-}oZYc&~vTl1C+a%?8c8gy}pR1y=SxlvG!NPq=*AdkYA($RH1 zaaAseSZ7-VS8gj3fErt|aPfy=TZQaAi9Bksayt}-3l?z0^g4Ox&nu8C{5|0%J!8NU z?H$1Q7tK&k1P6?S1{1U;IEUL4O`;=z<}w@6lvkmGLVv{Oht-kdOP<_KV1)b;b~|9& zNKgRL#-SQ)DnE|44vx`q`JFZlaQL6cv1IM7JB5EE{f*K;3w=v_LHemTwI`+jA^IPd z{w6LI)AJw0-!iTk zL&8R<2o$XlC{zcE_8NTM`hr_YoH&XUy!djSj==7ap8CPEW+ck_IFaS3;YRJl#V94Q{`sB*-*e!54t&pn?>X>22fpXP z_Z;}11K)Gt|DPN%;nVKiikm!TBMNo5#Bd80haMr_5C`Q+AV*H@C8Kp{Jsd>ADenLj~%zxuxEoXpsJOUiDtg5IhHMP=SfWqetI zr;wm|b4yB#3KeHj&+v4Sm4Q%IQKZZ$n_uQxs8fU578WT(ORJQG$VO!G)_E141=dP0 za!0dQ%w13fd8xcYNf=&~kf2MEr>yig5Y0>qt;KUIixFF#kdRja;j#{`v{qHly{V|5 zGM*K`0)T$X(DV_DlroRC+z=!drR1ZyGSsM|q`+EIM){oQsjv=TpnyA~!owGI)%kSD z^ShDfe=D-L8`=A<$mQM0<=x0#q?LBVhLy5Z%D@Y7=!ibk3JSlGZkp`%eW#Fd-$=vKdTW&{HHEL}(Nz{j>ZxklzEbU6ifx+sL zc8A`dL=E$A^FrRb>M1Bq7+%qHbYXK;%w1@e$fzL!VyGe8L0^|3B=pp;T^Z9-5vdl? z5~wIDz`$vAql>KtRTUKgU>qB|2wcvSing%3Q?T2hg}lXc%S3O0WrXekkuC5nz;F*0 zQBqc9t@4(XmUwSdAnhEv=3Ak-tzvl0M`R^Zc*+Wj6hog^mKPP2%)8CH08&;`UTTP; z)eD7A@l*;tRizjcp&WZ_5sr+Knct2l`NTc^yYU`=5H8+rc%3+$Fbb~?Z!+E{ywBl% z6z_d_Yw&vU7UG?bw+U&U!}}=S`|#G__2MnWI~}hR?e2iv@B8EA7H%9Mc)N zgcFB(FmI;e!!no$^JN(6jPO zF<*mDct!pOy$my+c@dxa64$67^EK+uyqJ#Vu`J@VJi?iVk5MP$FfZa6b&kxFbTci> zW*J5t@eLZ_6=|72^J5y8Ng7xVap^LD)|dG)F9Vl(^I_i1lj)cj^B}$S8|5-@=Ebyp zh|BzlYt(^x8?+fbHRvInbTCiiF`judPv&j3BlBc_#zS1@VepITmh=+CH zLpt~{e?BaekHJ&Y#D{e=p6Z%Q9Ht*eUpWBCb^v$G#c?qBXBLFRPl5Qg^TT0YfU~wV z94?0Y-~#l0xaP8O_;I)?9`t9p#kZi}qo4;Xp%dV)tU_OhyLKUT0SJ5WHs}eso9=)P zfx8x8#$rHdCw3x?f@{Vm3l(k(+*xo_;g&O86AnMb_}XxI6WmU?hZ*k=hpniPx(eyw z7T*JWxGV1shaZHi;pog$aM$7}&3=aACZXbH=$=%#DR8fXn+i7%?n>yULbw{-8n}y~ zyB>wR7RNmI!rcLv0`cI(;qb+D9|51>F8U+Ng=@wg9yM@RK92gqeH`v4xDVnw9}3Zf zPlv5G zMR3=`UE356n;}>`HipBQaEo6D9SpaI!>w>rwm|>Foe8%SZt5rD@L;rSCa%!Rqx)Gn zTne`sZZ%v!_`E6})9u$NrbS7n9}b8&^R^?xxhG|1NjQ8VF`V)E`l={4yrb~0o(*15 z7!#9n5>qBx`Y$wBD`U^S@}iNp3jkG8@y^1l-W(3|=6S`Dn6x~~k(lC(o{(4`^=QAu z6hIt_W=DU1)>T&GJptSz;^JZYhw#pXOya7_-sxZHN3;zsl)yjMYPCqX`yafwOy zM2$;KxjTAXqBR&ZE-`ggtRpdfd7L9L(--efY%@haZR(ep=}b&_B&GrfxJWZjP=n10 z$RY)0UIn=y_8s#JeoKDE$Zv3YIQ$vw@*U_a>pQP%Ss%)0D)ier=rMFd@esx{gw$WJvgdHsbB+l@Tx zkcZG?g8yjixu`L+q@Ls0)QWg3{E9O%>H0*oXmeisv;%P30DMSeI$+xXiRgB^c76G>Sp>X(? zNEwTuA$nSLmrOE`$AQ23VaO0Oz{4?U9bh{E+e-jsBS_g;6%7gS#jtbzwqK$ZBaYPU zpyQ70Kt737KS!y_Ef6*Rh2;Rj{hQr6ZaM4F1 zdGc(xK1^r*fqw#*bvBc&h73fupBhVEpa;r_ZCHvl53bewA;N@J1NIzXAXPl%&*W;rZx`-;&qF#ByrrzRVT1Kbg~nyp#>F3E}V|YzJT$1I7!0_*n7o z1#Bi@uXEBO+AL>aT=WtV zXBuBr)E87_q+<@^nsyjAWMWb(U|!+_HjVf>U3TH5MC<&-l$=JGg&Tz)EJrl$-$$UHZ{gDfD#MLYiEE?AC)VNJ1aq?$?+(0ePz@U1{ZZEw3&Nb3nf;BvCN!3uTak8( z*@Ve6@GD_#_V1tXIq>h`!2WY(E|2IgWsOfrI%54ncqxc@jCG8d`(*eW=_svlNY4-T zFuMsKJzHgm9#5Ntj}ecHb36>Ua{R@^?#^cu#zsE6qwp`5@EK z!yI4ug!Qn&=lyzkoF0$sYy>~_>dHj_3%_l8d-X2*GF=XGboW}_ou|9E>h5ygyh5ygHRxa4Dt$tiW@lev zO@&5Hvu2FGWW*&I))DC=MrUMXj1a@IlnlK+ba~#LYGl;Y4PD!ft3ProqmtM8RKK3$$4(b+DB$!{iCqETS1ViLq%>O|)f=>&0o zaN2Sqi~H>Cv2f121V1U}_@4tgJth_(`VoqK4RfMFKSfMNN=*Dch#Fx|F`4g&7D_@V=-&=N}v_X6DGcpGNn($`v!=_UPQTGlK{QHXNe@Gfcz*+bibs>Jt zj5iC^VM|cNqPSNFqr4&b=A^{cB{Eh?I2ZjYu0C-C{N^F$3k#LFMiG;g@Fw$Y5-}+W zl)|`;BF36vVVY(UlbWy)1jn_AnDm6D$Up8i5tBIt+d<+siwJwd)hIvi4H2Oxj7G_E zZ;60A;V>!{*D3;e36C&liwI0l*Z>yAZ54r;3BwuKCIYh(4l%cP&!7m*NqC)|fFAR7{*d8H=5@%V-!oAIFES#P;qQahKObv^MUG;4Qu;j-B{I5}aU%Kf z%aO-hoWhyK4;x(>#HGS>ve**41a-?9FQU$Q58n6&0sXxJ733=CoJ~IdUPQfMWT^b$ zCS>!fNf42qO^PvFmX2b2=prbqe(WW$ntr?iP_`HFm456jubM^?HUTdVEGhl|B=E)) zMul2N7<*`)O#1}F4-;08v~{M@#M^Gg_JcP3mYPKSJxhk;5$mFGO%Dx&Ron0H zps3W8Mj2lQK|P1NJMoHo-4D;bERLYfJv{%H zvGah_qDcOJ&y)6fHnGbRmb~O7u;e7LNDxJoq+mi&P%t100tN(g;5@;A3A19(*)w2H zm~+M)PdT&adA{FY&GbCaBIo~|&!=Xqx~rtBHjJm;BkQXpk zjMd-&;+>8fNOuW3tNV4-0J=-CwYptLJ*K7w?bXGHa9rF4(*v8lxQ5f=njET_*jI%) z_i6Zt!Fh6d6bO@pcG9^w_Ur~!Y3$rDVh zmM4FRv}>yIALcJ=`NiC)CKe)B-7l=G&R8^};4s&fc8}6Ep;6jGlUNLM>4A|#g;JP> z4Fe+n@4Ca%P%DfbL!ZuMeAkut>AD7yPj?L3%BN3Z?n8k2bV+=$FhLFu_Ln@|4* z#}|P4G%znz@!+cjw&c^}y`Fo}(>Z;dSJMXm7J6W+Y5IWl1Wem_kWnJ!(?uZri;zzr z0l6Q@`!rSH(}}3A@6#?(u6#N-&UK|jqx3EK^r*<(dOjT+@rOR0gSPFX0gI7VSKg=p zu84fPedyC#2J0?>`ShfSJvbMc@j>a=Fq==O!!Z#spN3vI0#fHpw&c^Zyq?R^(>eXR zS92o##{%Zl^FYo9%%``4Tq{C8eH-LO5%Os_#-I*B-lvTs7bs4zKy`he4vKQ+)BECF zS2{jQ--1v77MWYmr@KV_p-v!#?a7=9?)F@Gh=9I#wh3!K$q?Rf6RcV#a(#g z*Rj|VF0GiX(&oB?wVLi_)Fi*Qkt?(U+8Jo67WuWGJfUwx+ze#YAis{4BdRp$0U135 z{@q58fccPn07Lg#wA2OvX{6O^a>wje6{oQsP9wh%RV3<8F{;;RTr_k>JJr^Pn z1f@MuUr|nlfu1$ASt78-au?`M!0krwUpGQFLsxqlR(52yJ_$&uEm z`O-Axv-oi8BJ|&0J%zprZ!K3 zJOUFW$?){(l-cK<57T}mt0 zb8GP!-Styxsp!Uy%g%T4H(2{pHQiae8=rQ1k%awmpAQn(O339viS|UkFs{UESEA|V7ky{J zkXW59(cZ`xHr^ywS2P~jktc0rcCa5TUzR?K&55z3KaHyOKw=WauOR;tu?IwJ^fv(# z`$23AGD5_m5C?+H7O@OsHOOfqPKLM|kBEyPUI5u3;u?sLK;9Q|dm-^4 z#4n`&pq!^6%IID>pyE}CL_HV+N5-nX*e|E(azgV5l=j2Ej=R2RS2uh#gtX{ zW0Yti*rF#x@!JZWjh$r)8>U2ou3+sF-f39qyxr|Z5_Y8~695aHw?``JT*ywpE4x@;rB0%-@3ZA2)L zR)8!Pp+LG3h^6D*54O|X)CMy!gbKO)vRo|1E-T*XsUd>h47^0ES{{&Bh@RZ)IxLGIKj zH!j|l-dmtF4$_jS$Am$;nr44VLFzi`#>|ccUBTL0TM?KaR0;9QKyiHyxe>4+ebu=9 z`1Wd_SuptV?bR{T;K#S2k=envB)(pH1vXn)c4xbyEns1}8^~l48sCb=NXy6NeZ2(hKlPG$kzc2ou`dU zp`-Eb>0y%blK$-_KM}z|DD3GNh6i8~E zl_C^Kv5W#q(S8X~Kal=G${m0O(wiVJ0?CO{{S-(`+vEeO7(KcaNNd9MfU7UcGea^vD%=@kV^ zy5a^uR+-_B@96e&m_u#qdlR`pMK%7XL(8{bBd)EBTIebBg; zLM{L-NFTKE?Mle=#HEMudK)O#utihw6=Q57isr9vTydWjKFp zfswibb-hPQkB79d+#6NX0Sn93Ag72>Sl$V8qX>ni8sc>k3d>Xt!?g&7WmkxffQ4lp z$krkhmOFz?5}~l13o=`T!tzLvB_b4-F`=+rMe5&_qp-XZ&)01KVljcWnqT);x- zb{pR=hFmQ!h0Yx|a(x2%x{?$+ci8yW8|&Kx7CLv<8{h7%H@@9fZ+yF}-uU)aREdD+ zwY#UZD>bk{IugZlj|I}5AlHjfAbkw-mIwt>HLfWKERe>6Y%M~8v9 zwfJ9`0%O5^eE?I_WLwA-_kH#@B;RZl6`a5lT< zMaXh3LuU^(RL|HsmfBv=);WXq)GSxb?b}*|O}S!i-#(VA<>kIM1eI%TxnI*+V$id6 zzi!5>g~EP=FITEo33EdY+SZyoK-*g7I{h)vN^5l+qwfK>vDNAk&;xD%s;ds}9Aw*D zuA6@L)7k9T`}%STbgr420+q(j{iV@lyP9k(9U$pe{urgV((#dQtqeUGo4PmE=?>TJ zR^*!7+i|^4GBwS&GQ*0J_$$IhQ9Idj&il5R7}B93n!n6(sv=f zOOzhJ`6zoW%C+XNk<&ItDRbg#%tWfGjyVK7H`M`$U}{+5iOflK1-Q%pu04%ak5%~6 zjy%KwtMF4G4*^!;e}a4gY_>mVFO8N*3hh)!=k$JFn-&xQLB_OIK{NnNTQ`t)BILd) zfNEgwy9{J4kayoNk#@PS52|rcl9x&0GMy88Jgjxef8+Dnu2hpOU2NXgQssRqf!>guK%9}}x9^)sqY4b!6mF}1qOSsX=q zBx2CR^~OkW^M~uV5s${rcOsr;6frfqx~5>=8)^J+#I2$8-w}6y=*&bnb-A3fjPh(}|9Ee~C#0~m{5GQbb1XMd&^3V7 zX+6k&Dmgy=aEce+gs|UTei@eMC8OQt&p|#Eq21*quKfeTc9%Orr~z$v`EZa!05*iG zwWQ)=zjjuPx{=>gEx|TA1D60w*fFi)sqccU4kQk@W5B9JnWRVSMzryK($%0j`wzI) zB|mAZ=Td|41aO1!wC{*)gYdL(gYdL(gYdL(magtnotkbhf$+3%U3g?Tdp6xX)vd`$ zQ)S18vpV~m>e0fqhsS;!WYdq8TUjSyQ}z}jA>G0lat|OlGP{&}rn+h`{$zC+IMck* z;lf*?D$}CT-@vzd715$urH6vIw{yV4ie{I;$*5dxCxMf85;$omfs=LQE=!Ki@C-^G zja!aP{+E;^l6Dd}`5P%qlPPkSBs&wqi<6zf4@+K#vPH>aQWhp(h5yjxA*39Vw3EO| zI|-b$lfX$k37oW(!2F7f=XG)#{QD=r0iTyV1O0Q8b`m&gCxMf9k~=49CxMf85;$om zfs@tN#JC75;&`PD!R0RmS4Fqx%E`7{|l0`TTdvLOHyX;L9pyM z+=?!%XrzQ@+(vcfmQsGoSX)oJbJIErT=b@~EpS<#1TK0jDQ9$0)!QE+{UcSEcgv@eP;6&`i|s6Mad;Lu ztFypGA0%tVziUOrpKX!3oP^Pz6T>c5tXD)UivJB!SA0Y{6Qp9jnye^3l{aQ9im%{a zSKN{NI5#FdES&9EaX<1Ei#bc%P8c2ib#BjsT8CWjvN|lB?KeW@bWnP$TdCQg?6ybn zC#@61*>R=9ojD`Boz*g%)`{Wl_C}PZHQ~#SH=-h~DPML6BO0Z3VmLd&h-&k5c1I&x znxC^f8BvqgJTg16QqJj+#+fcVDW}-$lGcgg?BvGF(NpVkGB78fW_4mXJ0JcIj!V!|q0;(oK}JN126h`Y|FZyQk%py6oYq#Ej(GDCi(9 ztwtbNot;&V1?g)fw|BL0SFzyWg)X~KQ!!;-PNip8N_ckiGvRBI%XO~o4sWiOThDO_ z=?R|_;<9?pn(I<^19U) z;=b>~S~1rZ_bV6XDi(2sxBQ==brt)%;sJ(Etl0Z7h=FCllRh(jCnXgRDpwe8wEkB- z*ocqQ`=h#ei23C6Fg~Qo;-e^}rNQXHHInPfaK4Y9^RTFRXi~y)i9_neK_1D(j?VtmUta^Lvu-ww*v$t}at~ zFe-BW(kkaRvDmE*HibUV$^DfV(MY+W#`K(+l*ooHUcNjbn{=owr6-Bq+_)cbS@zA0 z4gsg^rWSX(RopB8fv)1oW>eQ9^5Z(C@`l&y_fn-V1$FfpT&>QFm)w+??2}zD)VfQ| zhW(T%8(gIxxuxC9rTHY!8fp*odonzh4zv8L;(U1^l<%T*;sN3-=nT`5)w(SUbVcBR*Cy2tbev++R_>* zDQU_Uke*3g33nd7@}NZdsw_lHat6ZE7f5u5*a5^QTzw$sLhP-a;Sg&;&H#$GgSZLg zI-u*IX`x!3GmhrR<#$JEa-nNB1ItE7Dajb4@$6cXNGEfYa3EZdqVNGAp_5A=f@~CV zJjB-^UjmI+K{Q^=t#opvvI%Q93Rc}Uhhui;zv8V1nC#)B&XjL_&HuE^n-!h)uyD#7d-dag-gn zYiNTL66v%te7f{t4NX$-fTk--NQ(}nl$_j@7}+IM z&c+oA%M-b?N@$9+$!UUA1(3KDVid>-py+yt86eYvCb#jH?dr(NzVt*7%2$cC=^5{Zw@kr>cz*wzf20$i!TMa7_i>6GLolhBCK97z7@Xfz2FNV&jF?9 zN32mVR?N_ilf{nEb~U~ogWAm9kwa4qbZams8~`I;_C9SVa&m%hSM!p0Wo;%Msh8A- zzmzI}C}LexL&??`zd~RKJz@Sxx-R-b(eZ(vA?qh`tcI6mkuBG{Rq zNzv|I^t+{{Cbe>?!tw}Eq{()p-Yk}ZzAr%B0NYNmHGUDI{DGZ9OYf%snP6f_oCz@! z7Fwxjx$E0u8NXi&rsk5hqr-eP*wG00i1;P!%J++mF)Gt7Qmh%u5uq-&4Z=Y_GpZo- zoO+q#TxLY1Rnv`|qD&rEne7VFpA6HJOR6(2votDL0vq!JrMDHNE8z6R+i83y2ihY| zFNW1|!HOW&b~iGL_T|>TH$yK_v;blR$WReSKuiajC^#PCe*D<@1d!6?q)?CMU14C0 z?$?%3>JUax1tYy?DIJ3TiWR0?9RnM98!iN^d~)^Xu;p1PX)?HN6oK#mTwmkn#sC;BVbQiUC*qH z>Y9q{nvUz54(p2f>7Y^G=3;O26rC##?k`r>#tGcGv;(1w$zDUAm2%_#z4RQr0-M$X zEmZ9XdMWp5$VZjbOi9~F&1aAwDQPmbe6YEOfZrj11={UPY2kXibsyRhX!i$#&y>Fo z*9(w$Dyaj48?3c-=D!=z?s>R2w3hm#Ar}BH_aS3m|AAl^)NIF{ODj-a!CDQ_Ei}Yj z&`|@-Kl&5dfK}##kqU|80US&tX+Tch6ZV06ieK13D@6^fQ+Zfl)KDev0p~8j0R6tL z8B4!*8a=MCXgG@WML?6gc)XQwZ>4k7kKfjnB_1O2dUCG-63;?>1oA3S^d`hX1KCpn z7;nBDA4(n+{&D)ha|1OsxUO+HHQ1EhMF*ceU*m``xW4R83~D=zytW5&D_Y9!nt|-K z0n-lP5%?#ODY>>wNpE{Rx6(hNf-kPYYfM#_Y|1iX*tjt%da4YPFI(;qmKelX^j^Z< zygjzsqwaQ!ycMuV-9sP`0`{ou7D>~X5k?E!WrEMax4{d(4e};XIxu4O6V`D5H+<5W zO`aj)JXRBFz2$vx)7NPF)T=HY#Ig)9o5sX8d8}b1Z8o)luQ6bi84NNID4kZ{rpR59 zqe{J#^z_v0MRVrlFt_{1O_^s!ajGd2>$co@+A;AB8RJksTBZFPVo#81D(%LIJ!+i7 zR%!ASjk0@O1%2=E?p)$!60bq>N~wAe;sKC*MSKDACdjiQeuNk>m@x+^a+J6e$atW; zntK08Yrsm3bwRofY37o>;g|uKOAZ4$1ZYwylC&RDo}DrFlgT~avtIym4p4exeGi6D zD0@)2t~XBzTVOCHYFM`xS6=D;QH~xsVGf)%KD2|{nv)}jptk&_2whVHPe*m1vlT*R zypuS)orc`9`Cwc11as5hp(h6?HuC$=os`HGtd)NUFLo#PkVInj+`;qRi325*SUuBq ztVHWVEnTZL#w_fjF{Vm;UJHk45m3f=)?KiunfR+dBCZy76guQ^I5dgml-Rd)?qFBj zQV7(uP0_A48!|<^4%g0UsA#vAv7+6&#ENVx>pJyCXE3oWI8I6)LhHmfR>_4t#)66c zl-zlq3ntl~jmy2s@Yd))p_RO-NfwrpIgoegNdW%2L{dX4=&ee zHFR&+S^_r`E7t-;=goIrwR5x{;yz%j8IhcJHZ)>|KY7XUlb7rt&DHB{gy>X>&52np zl3L!F*@;PzAZjgJ29fq zJtTBS*GPMGxt4Oz%*Zq?A$0|7`!z#a9sDIse-5w?{xQf#V5|Kj0YCcI|M`wpq@~(u zLBuzSkO&peZ-xu?cnNmiS`U_BYGXZ8YSxd5IBH!>#peqVJvB_mq-1lf(B8_wvEVzv z_EvQ6q!p0R-pVwPNg}kjvJ_+iV0$Z9fm{G|{Uy@-1$L-I+AHiLesFYoFuAoWtsWbe zr)?Ip;|#aE`c-X|drEASc1mpYN)6?l76sVowIjrEz(%h#K~5K;(d!nFD*+q5egydh zu+eK+q&sl>R@CD3`^^#2LTvQbCAOVJH>nXYVg=NFE{ghZexbx2CJ>E)&=Ft9 zLhm9pI+3ny6*;0kJZ%7T`2di9v5*bdXb9Zr2gLOaU?FP zop?U{A3VNQ#gl1Wub5bmZ$$BQ`yV{M&BU|g|KRaWB%VY62hVc_Hm~^~JRcVD+?3~W znUcuKD&XaOhI|%R#y0#)bj?l0tbVYb3p&~r0&~T)Z!qW>C;W35s#U$}j zvXD7_1#8hk;W34B9aG@jWTA{5Qz+Ll1wK(W0hz&BG+Yc_z7&d2lkLu*h8J=x*J%Vk zO?H^@R&elDvXhm0uM#?OOMlW-Dw>FUh`l>Zsgz8MOd0iRW-kw$*~`Ob_VTcqy*z9d zzC7&GpJu4*a3P#I{o<`-nbf$Pwre^W9^}fI<#skak$aK!E`}#_+T`h4ek1Iu+)40s zE0;~#+y>IS8&5@U4ER=tSLTic?@^|r8rt`~dY0V)UXvS*Y%jxWU8#y!a*66x7Si{7 zx`8X#4<9qNNmZ?((!1E(&%XLbP0P8iU%3$1a*^v_J_xL?<-Tr!VG~>G&F8?faiq@7 z>BVRC-qXH77QOb&>9yw&Gxu|Tg@Yz3F;j{{TH;0rp=ny38NJ+<>#b*IX!1F%Dt#Q; zMLw^{HJlCJp)nP0)>O%*>WJt2{1Vg9z%@Lcl&(#vVVf4p1F-~Yj?p<9Xn!nwG>RF1C9hb>ZnFXO`@NGx&;`{TplfhRrtU<=h^}t&!*tf;<^dy zJfOSId+Zq4w7T&~3<6rsK>JQXl^mH|@`7u11oXt9OeH6m+?VREA5c#UaN~M>TT$AK(G(QWRP({`e2AtL6!o|kAye~ z9XeH$J__OkQr{KZNf7@5`3^|x*C~4+92OebH_Ly@-A!W#L&)eiiUm9njD{EqG6HaA zlKEFGht#E(W?fw+ZN;6Idendggcqq9~P9I$?zInP0oA@pA7`J zL#zYQC$oa}g~SOEZ<6}Fap-zCzofvh9te4y?_2y4W} zkqLpj3KO;oa#clWP$PZ{-#dUcV%IVBLckhv8OXtaHRAIicLCOjkE7$ifHmUSv5X6V zHR1}8MSwM8VqRG9z`dPeeUn?p-5T*eIPL|k5uXCFpIliZo*v71EZ>Otd-+!pdD+W< z2(l4yss53y8cC^LiT6u+v-bi;=_w`I6-T+2VTMcjBGrnQg<=J*SoBK1 zfHxyl&Gl@^nOu?$JMH=uM~9>Q+Y_{r&tIRSSj`1fzZYbe(4N+>FG*VV3YRIKYkqN( zj>AQQ{C>9}Yjh|Xnwa27VA0$t7Z)59p${=@I;cKloh$l#G^p_2^2`X`tgubd2r}z{ z(3XVL;hAMY?voNcuMuSCIEFC5Mv&7$P7t9HBqlV1+)3(<%Fzh&1<3m%G=enSj^Pui zKZ496WePA-Bgl7A={Axa8WDjThbfFCd&v}yBv-;EB%LvybZlF zVv~vLAI1{)KU-^YHjf7SFe)o>J#oPFAbl(vgJ;QEIi8alK=3BSxgcwR@M+a{|ImV@ z9psuF<#u2Qg0L7H3DFC%7+Xuo+=K#)v2#g13$Pfw z4dfaid|J&93*`}MG1gsWD#pHo;|suIBoWu1J+tObr zy>npm@6JOvT13MFhE^OcmmFL2+P8Ym_Xta)+0B z9^~0rW_&F3n#okF<|^kMSl;k5-+^q3W%h_ znH659IV?>8t4}RR&sb(vEc3I;-0o$D!!pFn>;f`1mbo#ONziDo=05i_vtZfF%Paty zAIm%#*Qd;6S`u{TgJrNB?q$vdSslxK6_-GJ{R#YcG@7iRFrysfMs)QxmfHc7 zc@AVlEVEB6^RCI<=Vji3_=PO+riQrFxv)!42Weej%AW*#pZM`GZvO@yv!aTyTvlM#4~KGGLM5iDw(1u_E5A?&Dx?j zW3>9c2n}a2_mba*=UTKI*?R(G%aZvdzA%#;m;GBf{swC`?c!Ovq@$jdZ&AVvKwHgY zmh{rYy?io{Y@kN-mL;P!C)fsZ5YSfBm!+-5wFvS+pxs1wc$GGD6H8o=cc@QS-eh*w zlXq>UHe5Mvg6fH_sRJ1sD$%k8Th|d@6F2k@jxfC@IULfN<3_k}5-h#LsGCFPw?7&>=h` z8&!P=L!uk<-PDVkT!bX24Dl%{$>k+Z< zle-3!tkM5LDR%+Z=x@am9%~&*j7*;<2{rn&a6bcBqkkStcx5o|BGf@zXRwt13=s}Wpjc;RQ3PxztQmG?Gg{ki;b+E!;se?ybN%Q7&1Z&7} z4VkX-FXHHU#UGuSZd#4l)Oe_+Ih@!%4kjh$Dy*EJX*8uI8zs% zZ_>s>RtLB^-E@{X>~vFDV0gMogF?tLIp&x=@J8wAt1Yk*V$QXKDLUAsVYrPQY%(?N z6@nZ05I=)K+=Z%X-hK-FVCQ}s+k&YU>>try9{?8Y{{;C8C{wV17pVwhtZ%53z+X=B z7Wluw{S)BIRBFdPLykm@JsF7*_>^zKpPEVnaYgnm>;>`ZDD!jqkc5S$8TV}-0_!Y2i znZrR25}|FGJ3+1mY+L3hkdFb|mRW`|e^4LWmKn1PD`UX6Wu|~k0Bl>vuYUt~G6Fy4 z9+nHVEpr$g3xRO!4rCeN8Yo0(%mm{KV$6@;zn7WpN?1?vvS)*wDOr~}BC2AzpR%*9 zpVj3S+cR+?EbX;JF)*`+)0Ebg?w+eZZ$6?}*U*fa+rgB2;_( zBVfZxK`mSAQL=U-W>~VHRxI3zf;QfW+Ej-Km8VaNIBf?J&$v|IC|4ujl~L{zUKNBJ z_ah5(9}RPp=(p?@F12Gp_J@(M&26;1S#&~_8`M_+EkbSJY_AQeRZ)sQv#%>yEBgZN zpr3$mmH@Vc{v61?BD90vX*$zn!1m*(f{YcRH+pA-oFYQ|@vnkB1=vPAvpIWHSG6m{ z5MXba>Sjl!+7E_#%QQ-bE2>7oG8C|=nhdg|2u0O?AhUqzEz_pJ!s=L3Rsa@ODdoR zMee{TR|Dz^^>V|F{iEUOT2S`o^>}#nr)EXDT8=zlFIW3|n%_@P1e5u*t1CJ_;w9Ku zMyLh*0DtCZfKEVk0LJ>`K=z-AbOvZwVz$)I01cowXtbDlNa&OC;UBk7?{~s8KtXu? zCkT)K1mVe_AUydKgdZg-3s3t5;r}r#&)T^HST0VZC0nGAkECxYr6F3h7)1dMZe}(x zC^4w$I*9BC5JR$tZU7xxY6Eq}LFD}dsFcJ&mzGRock|XXE>%Z@H+m2qsh7(tsdSi|dxey@>AChFwi04W=zndwl!SAa zlKZrBCre_QZnJb#VTrliTuLHE%1%8YOy4VMtY6{>y)&0YJ}nU=SFrY6a+_QiCI3NE zGW;J|#TVfmjWupnl*wxfyMnc!B9r+ihIHzsT0KH9)9zl$IT5ESgBpTj=cJ9)uK3^z z-u+3NZ`DqrGAx)&%$v4meT2S_rWb+C=M>Pq*URvI2l=Iveo)d-`Jv%%3^zc@lBmpH zo0XZg*qoZQ*qnqfWU2cQHz?Z9;%_I5wh727+SW&U-W0(#W_y&&Nd8CW5dM^gK~@@u zT<2_Q^sg{sbERQ8mWBaXvX~@}o`pGS9X;z324hLV(X-MpFpJhmJ`Brh;YC+*>sqpu zJBQEIeM_QqXY^Iq%)WNB1Z?)zGTgp(6WF(If{#+Hfr2{nmIb3T&K(ulw{C*}l5#{~ z-?|CxTQ@<9+$Dj1>n5;o-30cno4~$x6TAxlp}`@jKP0ek-30cno4~$x6WF(If>V+@ zn`7U)33xDZipRcn6P$q!a|8RoWSl&9jy|&OUYdyx}|8H!fc= zd}8Jj40_S?Bt>N*;cBY?y?7=KTw>I~>rrb^*)SYS5Ze0Zqi0JQ50*4~vx zm6n@_1pe%4u@rDA{VS!jeC{P- z?giwW12m{U0v4_dDPti_JOOx1&)18h^cNX(m9{+Kp+F8JbYdBfNF9OJTfNrHLajHW z@meeMbaGo0GgXTYTHr1ZQ8PTp2y~stMiuZN_qw9CUtcvH6MxDvgGJ3 zS+e;5v_uEMCwiUNggVbh-x+4fY2@DLEx9&t$@Qe4@1?E{rJh0JAF||=Em`vJ|7A(L zW=EMPUGwmd`_${dKGdI?folNQu5ZNIF5>J)&?NLPI&KI#E8%JYWXD8|%ge*oSx6g& z-vLZLP7TIMPCenKP_6|s%~V3_MRFH|IgBK(lNfh*=s z-8RfEnaL0iG&ne_ow|>DXVitV$3*FROepPW8&sBo+aU#)szB?pG?JA$KGfO~jctMU zHDo3hVGJ4a=pYgY0FC6j+auK}mH^@?-k!uA$*lv5cZN6>AfBgr`&FzXH> zICN#)@v!a~T6a3R#{p*DdXW17v+jM6H$}+0SGe$^2SXHjeCK-4a!e_OYW{((5YSdN09Q|N923}m~pSg z#-#|P$)RySllv85#&wxZ2L{ZzZ9ztgkmn8n*$Xh^^pAS2U2i7k5vy9fW_Swh1T%}g%(|RBGdRgfGA|-TO2j8Wp z$}hWy26BT&ufrIMn`-)VX{T`{XzQa1&_=VLOZzH!2xKkLRGaCSj*|%oKx(I=LQ}BI z?B7tWf&9CY^lOvL8y$h*U63~^Nw!^Kzq@`F@_C@qp9bi<_jHiz7#NDENrflHbTCmB>7^I`Qk^&ZvpegBJ)M{Tvn$*zq=8c6Pe8R zL=m+_;&(-pxV7qnr86q!e91T2 z$IY;jr1es_ms#dojfZ23Edex*Y_gOOUyI$vTT3}NFy2!dPbIX{iR;vlD@UNe`WOL z+pU!yuInrHlQO=1JBp;*ENZmxRH?q1uC}ufm0H!On>Sh@dHtF!-J;QP!aH%xe9}~c zjh^M!yHaamzWe%?Qr_p*M`eBwG_c!!V6M?Gx}D3d@*8e^_cceA8BlfcVDjn&$fxa`>LFZ;^(**J!(1C#g!r+H<=py|MGnI zRWe>s zCD_MzUqwtUj=uXUVtTRG#zn7K&K|{D9P`~*Sva#;t7E?V`a8tj;^@1tl3Cy?qwl`T z8SS++R%7wS*HNHVwl=P^wQ*IrHm;1m_*z4Dd##O){bTzUew8hbt88&x6)ukLqp)J( zo3PrX?e{rDvrARkH({&no3K?HfoiJuAgU^=?3=Jv_D$HT@SCvx<8Q(~hkP6RCaf^_ zscz=>9%yI9#Bx^|eG^vYv{rh%Td6hQgk6cli}igBz6mSbnWOk7tUQn{9zhb{gcVU* zJRJM@Caj2x;^>>OA{rG(--H!WZEe6eVMVmGHsG7EB5I1Y$mE-_a$kqyCM5Aq*xCH) zQXG8~b`{0dx<)dvkwUF9`X=nR;H}Rlv)#896rSDnGw@=4+k*AxRLe&BKiSkBjg@D6zU3iR*_ot7$LgLa zYB3-DtMaNLDJuC3YQ}=JDsn9jBmL|0mXnpR`gOJp{R3VlJ@KC~BPdU#u9umK)HA5*ppEj}u=3MU z^C9dFgfy6~+bC|hTwYmIdRmrNWeCoUYNO6xQf_ZXL7%pX+YwdW*@_Jt{DGg zbyQot4f!Ha9Z;*AEgtLU5fMOjOC_~YIj2D`18T#jczO$m$*!kw$h83 zwCR&24Sz0R%2I+cWjAALw>q@qG*tKVOr|NhWMj2-@vFlk{(Et)>oTZ-H?J&YDByD` z)%V$mIq8sjs9fxD@vo;6r#NBz&tbF7PWJPGt%Lz%5hvQwF#dOgDfN>{MQ|lG3hhax zbOM@d#OT!jYm%lxPEgWOBz3kC<7CLAmGqdBdcoxGguD@`siLGVRw^&N>B3zbY8b9N zh{-hB71x*YU@f6@FRFDza;o5a^O*z#*+sphl+U3)1yXuv*Zx=;s_QO9hvaT3#_V=v zdMj;{8^|=^J2SE~}EpMkOU;jBvMWh!xln zl@)*$*b=0<2o=}~qyu0DdNrw7O{9hQgK>eb;$dpj56C;B4;pHLyd$=T8UmEcys5Dx zJQkamdl-+Jjbq^+4VaA+L3R`&8+QYl4w#KzL!v-yXk~$o=9JxIr+6$b)e77fm9tfb zT44dmd=V<}2#}?K73hUi&SP72%Ck6RJ58SA=r|D##{+q%tb+PGU{0AAJH=zMdAZk7 zW;UJ=_ql-Ccm>F1B4p!@AlCzCqt}pd9@~PA=9C4oQ#=-zY6ael%DYvDobo8h!y;7R zb08Z4E6@w4oX56MpsRR;ddw&1OSJ;aQCSLDfz=>QM5w@aAT@v$=!H|x zV_PWDReaxo?NJ~fDYaM1Y#E9I_YP6hnzd*oYo6B@n&@|FEJ z)GvTl_Kdi)3o2CEEAUjlvZ+H*2bewOAf-ysW6#TFV(foa)^nD)BUM6Pd2@7Cd*vNK z+HJ19?5~u&iWAXS!V*E1t+Qu+EgE_Ps%)4t1Zogqm3<vHGeDH}UR!?6uu z5w|1A4k9#mOb6Kouu6ORg!9-IDs6++XHiKB=P^H6)meeFQMtFu(BLp1@^yHRK|Yg16E)YkVYa@U=2uXzzX!j zsaVYx3Un3s;gs!J&E=i46&kt$d8Z73>IaxpMny(toX2AGa&O>Wv#}2D;egq=J;*o_ zvT-WNWWa3n8nUsjE!b#I**+>Y;XD?XY6Z?jNRIFwT z1-goFiTHs{3!QQ-8jc3?PB|NDHDKe&$WuZ+yHNAu&_)v&Dt%${38ZU)RA zFW1+3Yzy|-6T;^Yat%~MUiky)y3Z?r66EpCl@IwV<*s5V`gW);tn6!OcnQc?_6w*_ z0E>Waqspe7$Ktlgy@l7TLH+~xzX5BI%tHJGm;)+6$^mnL*N};IZNUK+0Xs&eCY;CO zQmw$2sBEq>6ak$;I*3pN)PnQ`tUxcEiq&kPKvyvdrwndZ=#=4T7y{&-G6`w|U=gq+ zGOw|lWf7nc+U6T+Z`fu6X3v2j`vYc=muu=gwgr1E0(^c;cVPVpSchf& z`mdC`iY4gl)V#2=tI+T_AYa)lp)LiivS&t>ZRC!%%07vw@|C?4w%Y-#?0S#~0IRH* zYvMfizbfnVTe#EeS9Sxsp7zRL19@e0<*oioxl8>thxf_R3n9J>%HqC9^#m#(qzM(b zh~XcFF11quTgokCx!a@q7_rSD<8Ii_1p>W+-1{)+wgE!b8)MZCSw;0~akW=nx|nB$ zu}QUOOS!#_?H#ZBaoE-Yrg{pmPy6Z>YU#h&^7vb{-YJSPV$7}~x$!L#Nuu}z*xv_& za)_@$z5sH~Abte-9>{D@MZK%yUi&O7#ic;5fLdP^=ea!Kwnn)l-m;RTU^`qPW#mWy|VeHvgc-UnnBU@rFZnOHs&|C5WajV<+<=cIlE zWVY&f5Q+~_Npi6#LKl0nwDVYjf+b$T5`+))3XTC;4!AP8*o$S|H~1>=(v@(WBCfDQ zJrx`JtGV=PR43J;EZWb8@k}wNI+Emh+4p7#VMwk`Yk2#KcO(F%#V20cQavMeFF z2ZIa*%*9?lu97yn@8uO705T5<`?43yx>|gd@5_h5u~1xLU#>bX z)SGZRJ~t${2G#k#yd1`5Vz$2Qd5fL?Jl^<|%* zag$7Gn|Am^gUMOwIs>r2d=bc6puQoMPH*Ig5 zRYx`t$$iuwI}~WQq4yTR0_}c~e*hL}Pk=lISfF_gnOIjrpmmcn1=@?SKMz=-y$$jv zU{-tiY%CwUZ?orer?~w*<~a$p15}a%?GqG#q>>b9o(KcYi=~~%3KU%L6?}{EKfQuq zL4F3pzU;-av4So4<>V2R4@7-=P;B^q)`kN*k}VgP!BZk$O?{ezGzQGYZ9&=q=3+0O zavm#i@o629my5f@-W4zx_Xp_hyeAyHi!1EQd&Y(?Gnej->U>|` z55_rSw!Z9ni`~h_dzX0QzI+IL2Lt9Q&syp{rV@2+-j{uT#+_+Ozf#@Qmybf%5rFmO z6G4s#>Kjt&E;5Zc!R5uW?sj~Y53aA^_(EJR zbvd5U~dIjJ9P!=449=}J`>Bw zmTorAo*rB3G0#amy{wYdPJL0_TP3NTJQ22&7fU;j6)0$=dP~7Dgok4)Ihz-p`pbf~eDDS5hy*?9XGm#0Of5v=AzrCxEs2HX6J47RQ=T*J zN=@k*D79f~Il7hsHcXuYauN^@Q(mURH8q*f>KSq-e5(O-t7mQamxdJfshn$PO1JJ_ z=(3AZc%fJ7`&7waxvW%5Ux?Z&6=Ox_1-b7^3JBC`uS4%OfOXzGKyLE|dGSmv9!a~x z&Z~l6kAtFzS)bS{QvMLS9`wqe0eQ+R_adoScuUGPGWhz|@DB!LdH)``#zqEB3tmUz ztALFNAAr0MG}VrXmuu%nA;)OF3agSD1+4df1>YAyz4-4COU2gYdw)|cjJ?)gZb!7z z&tNSqaoP zq|&W2nKSAcat?fF1N984`b$F;TwP*MTwqE+sikZM*X8KC6tLjB0pvQsg6l4jI{*tV zFQ1Av6$IDTQl>uo5bO^E7F^GOJO!AgUOp4c$1cnFRoiIn8C&Wx&q;7CQ%UN6ucG*6 zm89VEL>OFNEbTm2px^4GImh_b7>_=1HgtlFQ4K^@HW&P*BcqRu>~y60CQtUkoJJN(aU6< z$9&y3@1s7yHeNEzquk z{VKo$?KY5G0JGZ5vt*5|ip2l40$LikpT|5Wf!0H2D^Bi5@jq0O0?iX)pn0*h^H_m` z`Ch>j2tVc(ya@6<5I%3cSTpfnMH}fs_E|;=Gi{BCG%8;#IMw9`l^I zc$G?$i<_dju}YGQJrTOti=~~%3KYEP6|_aTjaSeeq$?2iWiOU><@hQ;B=(1+ueid# zd{b;_GjnN&0Whm8Sr-}s<1jH>U-rDku7mL&F5b8=ZwKGDfO*QZmO78AL|vQrWuKpM zy-ev1D7C&k6#28b$^B~Gr z|6Pc#LjVh|Wgtfa7F;KRoB&vGdHGbVDPtA3{(C(ZT5zp~{dB;B>jIGT0JGG~XJYwC z{7=F4aBQi^JSV~RPnD$Lx)Q~gt0V=NC&J+JVrl2G0tG!)Zz;G5;TybydqD02!r<~^ zS+_I3$_LjYa6BX~m%1F!`SX?)ZjQP1T2$v(#?Qj|3}ChQtPNu!Z&KkRNbU4lT>Ay4 z^xeTIRXbVxyotis0c)R+Kt2HM;qB#8ZUu5oum?#nUZ?yMzOMkY#`9+4(qe1!b01r$ zc+N_<60P`ujb*$#zo)r7mnc&@8g za9av(vw6X(zbsfg6u#h`yU&!mp%fesLp@N~-7EF1C9zOwGnL(J+V45j?g>-c52ZE? z4Mx{Mz=okwAR~cr81ga|?j@5srJf-Z;2RH^TRm&Tzci$<&*a>@ru022jd%TaMd38B z)O(l1uTfjXwaf4N*$zd!C_m*qmfxY+c55o90_LE1AHa4f4hA{M7vzOF&-cFzvL4?x z%1^k@&H5|6@*~l8xL1Ax$Z=k|7va3!UnoBu1HXy#({4irN4avpqC9yr8Nu1yR-^EA zuhg>^#X_-bzUovOZHSgi>2HzJM2ux3*1Fto!wL(y0OjZT0=!r%7TZDrDHYJ*#4soA zUN@_6s;Bfy6khI?de)*?XbVcs>Xwnxg!5S5>WbloR^Np38+-wtITed-p#ZbGHn#dh zvwBWFrT3ulF0a(H7CDb?L8)0iEVkNXd8_rGSM!be2+AMw1$gF^^Vk*&FsmoUR{zti zZZV=z>9Z(&#w+!#Mb2YeQmO~eyjbZkrgXYBdjSrqT7nc@Wv)Kd3b2f72Xyv)_nzY6boFCEnJrF){JahS<)FRlF zde{T!5BPrv?1AHn@PXqMRQ^dp{Rd8N1sh`kC$1Fp`@{|Hv3x_{R9Dz0RR~oA<^|83 ziWQ*Hu0+1q+RocyaRJTnLR~Jg4d$y&TCHF)4%lR%Ge}3k+NL*1FTf^iUOweK<_%J1jhm53ReQ>$AB7{{05;jpFVNHM}?29CB zmyiV_*-UQ2QUncdeXSK->Qcn5R;}RLRt1-5)qip8v$j&LwYE>xw)VZcwzakO|NZ@D z=A3hHLcl)l>wC}SbIzSJ^PAuN=C{pn=A1J}{Xx;b4{BM_o&oq%I9SnMMB>Ldm};+< zNm=?YQPK9fRGXB6grW%|n-uLeAbu4GE1J1bie{=w%cOh=N(_SEpz>P=!TU(Oi$ggJ zQ!V5o=y_KESJe0m)l!dVKI#%~QXad1TMAN%-AU(RQ4R;QI|GR{9L(+kNaW#Qb{h;q z%cOklobvRf7Z%5)*9ISx%A#o25RzQQnadA1iS=jr%^HHA^@L_--z`+Xm109XXz6+&*rW!V<*Qk65CR4u& z!7HfzvO(|$60hS>3TUc@Tm(HU;O|l6Kd6>k#S34g+6h<}u?!z6LsP`!j{x&W9L!?T zg#->}aViqYIGDwze$X;0Mu*MmTOKea^4WiFI*nQGE9DIbCl41&i| z`7wjwIV7IJp_I#13t2w_uRP`YDIi`XC@Xj~ICnr3nr^+uENul#y%mXH0q`{(+ z=~gl%C9dL~>h}J(jP&tE=p)JLKY;6P94x0lBJm*(mXo~$>teWIIhp!cF4CBMa_UdW z$O$B)N*D*rDH{oT_LV91YMGQvX@Z>AxRjcdfrOk+qn1ccgMoMuwM23<7fMd1nzT&H zhhVosFdCId83dD%n1Dmc$y5tjAI8b47!cD5Dqc>x*nOF7u-{_8oaO_t6i1w#@_LmM z8TKV^?~@s65g|=jsme_Ex*M@1NHw}lad`9{vA$6x1Q6Qve zAGtRMG}*0W7@X*)45YNiGSdD_ATjFA2HyUz$4U)=6Ba1$8_^wgc`Q z9C3tX_DYD~roz@7M*0LHbyxYm0)+IST%4?udt(`|^4+Lq6|_t$Ni$+rc-&HSjvBZM zc(264t9&;ialL8CRC3(vE*rh&MD_@`xk^Ew+M5;r7lV8saNS{$-;cz14RTW@Xqi+` z$jO5nZ5m=#GV+I)fi>hA*dsg+gpc81kMJB4&*0$Y7E^DiRfBp61^Q7%@&auA^dQuW zIO2FZ+-=RJ##8n4P1L7=eS?>D4kRUAQr_(7 z%d0>KDcYsLdkGF!v}=&K3I{9N%}CscgB8tS2wEoPQ?%znEi2kSfZu_G743c`zKesY z_G+0F^N!f(*)7kGK5?lwDFX>b%OJ8z(H;ZhN2w)JG;^U8%~X??N%;^gHwd0V<);jS z7m@fe4rR-xTF6BJnebn{Ex(2uuTm{#%NZ9dpn!FNWyqf>BNqP#Fm(TqS^Pc{@8Vz< z{}qY9;9wS;`a#R2d@P=H3hI+BCtZN$6dcUr3?$NUFpIrfCZ*`<&f;8`Qj;=}U~!CE zA{Gw-;yh}JSZpp7i%m6YnUoK~cMXCOs65OdC`4i$4rR-xTFCl;;FV{~Qvq=jK`C3F z;u88JW@%`(-I}1k~A(7q)VOyT7IT5r>%CqHFRiJ|u zZ6ok@;9x~N4~gwKSkb&q8DMDcbHz)F(x|65yBPU`4wgiR*AM)utBq zu+a6Hcf`H~TP}0uXHo_diuNkCM2dDh5Z_8Ik)oLkrD&#_v`or};5h0xA-E5f?=c7- zM&bYtWy_{o$VC8|@L#+w{{S_fq*}_BPjdGHyq63SCDua2ebGM zBwokCEH?GA7h880e?oKOB|G>d4{s@Ub;$RkgwMhq7f;Eo6lvKu*nZGx>lROi;>}&vgln z0BlzHCqV6u%VPjInt<7s4QwClIEEd48em;pJ`r$}a4@F~Xo_W0)FS;Q+OjEktkI10 z3?OA&o&{VpaIh`YekluZ#1oQXP3AiH#u2g-aLaMT5fbT@5W2@^Ha0oln#o8%A*2_h zK+xTAMrJ@xaDll?)T@O1X_vcfEoWrQ&p<~=Q==g43>>VfbT@Dv4%XCmB+kacnri9? zEt7I|Q-~mMKOOZ+Q_lf7-Hl>Ry$Fd5a4@A_Et7I7O>hR+xRjcdfrO_1mRcg!*#*Rx z;$Rgr7fMr2HEEfY4*`9Sl@WXcm9H@fZb9N^97owJPDodEIf{bh5m0yJcomY{xT9j#lb>1^@Em4Il4gzeb1SwPeOkk;J?DbLjMmW z-p0Y*_G+1wp*L{^c*LdDqzoj4{u;GJLjNNWe~5#HZZ4G2O*Ltmln=oW>Nh(a1dX2< z1j)GI1!CT)!Bh)b=fw#<8&DCBHRFVyYkiejx*MoHp$`V&ARKW*&+Ap_vz_A#=v2$|}yyrKd}f_zDgdnyKWt)pc#lziv#d7^xe=2e_GPS)-9(16)@b zM{0;T&5 zwWfhZZZ(%$uK_l7Bb^j$9#d=8cB9sS^zBW6e24ejny~sw;u0xB4 zR|79ak)kAH1b4#9E=S|<3?~eP|= zl@|X6fGC;Hjc3!IV^?HS0lmYi$h;!cbR8YKB2#z$477er20_vusEV{(G7Kcxc}s?d zycakxOC!hwfE*i*0?pUck}1Q=9*V*=E~KsT6`HiPL4Oi_0W@i8WA(*cx|psnN~}P7 zTAIEnrHkYA#XPzgr!VG%c`n!jx?nJCocS{IxD=2Pz%k=}i6vPa*8S)ieNN}f9BeFE zk^~LgWg~J#$sdqo{@FsS*CZ&afwd@g+GabRHmO z0s44i(i#-vgR)4U9KHw1kZc0bR8FVMoyecY>3;G(l^K<$ggISZj+hF zUmGTg+%0KC(3hZE4fszqc@?Fgmh^#{P%*HDk|KLh3))TJ5>FZp10C6~ z$^mg}kX@S@#B=>$CG!dL=b3}ABV$`|7BbEW^5>a@{CVc!-;lE4xBCz`s7opJxs}fpmS4*j5+36Li)FPeY3}LF)F}Ab*}Y$e(8pZU7C@ z;1cAY8C;B7XIM$!N40+R6{Vz;k$QD1a{KZ}l=?me0_?sUQG~60&j#M`uS712`;A1J zp%1Sk=pvlb`_|zc>8rk>lyX1X51(d7)&S)tC=etNF&w$#ZDa{*#TvC54fMGyka`0_ zkZxHi?~s_E4y3HX#r>wDl|n;A_(ppGq4^Ug=wD=iveJ8I$FGcMr zBFOkXa=4n6vMvQE_t`nGp>P?YB)`(*iTeO(^x=j;UqZW{;K}z`DMQfJWPJaa&}^q{ zqTD+EXV9;de{sG}IZOzZTcdABulp`Y&p@RUPD=yA4+5#*C73lRwNlO{rmVM_DKAhq zDKKLkXr-J-h0Ut)x2C0m)}VU<6Mly|CLwiZxxzPwKJwJ(mHsw< zSxLm4Oe#s)LiYAjfV)CQA@-t35aH_d7wXxd*MKZ}HD&E@fQ^>`9tVHm`;NXOkKtc~7UJN~ zZ|~)-7FcVWQ5;T%BLcR1@@Ty|2{IJEd( zZt(yv@4=DRV118a2Eq^jj$nR>mhMj>*1nIdS8BC0s;z&^CYF!70*kgNaaN?m=dxq`>y^0ugCjsU?C6gqc|4?G+pB`R zuHNR@^!?eF)0IeR;AE{0WgW<_-OtK-X3 zX(*(>mB1aiP+w5vkHrH99@J$-|F5~sNB=c0{jd7K(!vSd-0gz>JLE)vgl$S9*bOe& zUwmL^B!Y==DjbN-pd0i@*v?M`yV=Dy#|L(Y0o$oX@GTc?wGZrtM6lakuy zf|qSqN=9JYRdUzwkZ%P}^EE>SCg`-o;hU=SDNy{OFaOv5R67(Y zzNz!uA&+--K6#eZSe^%zx>a9LN#HB`f&w&p!5_WT;)O6EUMp~w4}7tL2#dMjR0yrW z0bg;GDPHOpKkdVRX^;31`4G^jhMB*=be;DbJ_OJAgdp;+4*^#WeOnWUpdEdrzeyO5 zG7t&)x@S*-wFE-vT7iw4wG=u|P(VufP$_)jE^x6fCyiRF%EQ8OyGZjq`lI4arZ__v z1BEgpD{zMo?m7dP@4>--F;@A2=yR*wp@|-lGH)w&uNE%_3#qA^1GhmJ1I0de{LNsf zbop84D;@-g#|=E}4re#}ii=F~1EzN5E?+U%{I%=#c@|2ZpM$OK*7&xLW-L*r+GUUd ze~kCc{yzkpCU7`+=JEF+&0pd@b23GH{2ksi=Ws5?uV*eJVE%?~lV`{>LVIqB1*sjf{ex&DPzE+n)R9ZR*9}8U26}(WH-b z@ja0CPgOBO(&NMxs&UFj4P;sUzXsywoU7qSy2O#vWXu31a|Tnad>D=Xf*RhfA!*(< z3ouaLu-K;?hPB`$>#MBDw=~pEfTD&bprI?!(8zNyQ!RYtHbg#ia!nJ$_mSLEXg7** z1g_Me)WzGmC4?rMzNIRXCvOMPjT(rs95g`73?Bmfe8q2>;%2w_L0_?hfrGi2eygS} z@T9MpZdGvcEVuYazTy&7yxuLQc?tayX*H&Jr(67vE~f5XWQrfq#pG2cSpjjmLP0Qh z7??l#V2Z3jjt}NX24?(isv_{<Mci(G z<{s({@W@M^nlBUS8;Jgf5}7+_vY+Glbkj)Y@?dHnYwiW;lE07n;C{%@{h==L1V-J? ze<`p!KaF}mEUrN;g9L5ScSdq@ey}VuE#H9%^ZOFq2qOrVAe<95J`f!mWf1s9dprrU z%g!Sajt2E!m#`Y;=Ku?6NWM)?jESJ>;My3Ev~RLmS_FeDLq&Zs<_FECvr4Or1}Mf{)kI3SsOdA3hPfgN{tfwn-Y)?qo_*;zTA%DrQo~6x&uH zC*?y1g{XFNJdKK!sqvX*ks+*R)0h>>(=`vL>q}Svi?|2cZnQZT#{nr5ozX{G0q|sC z{G1st-{dN-w6hApP(?fSOwlmAz?un$HTNZLo#R~)+!0-ZvHhU3Z<%P{j^{Jj6KL8) z@zxr4jkJ$3hh#&#jI&425wLb?j?jXp_6upA#ib{zdxpz=FxE4{OlBnfmgauMXf(4Q zKEQ}JTlq5AShqfb!#C1 zN_%xaLMu1}*XfZP9O8O3Ol`?c3^gPuZBvRg%x~??Wm59t%D;>-W=5`_%uL(qNWWSs zr5fc|QY->`HDwP@ZZv5NHFQEUw0Df#C#3>fdB^%($T;eZb|(d%nHHL6$fEN5dZHfh zY5?+6jenS6I6#>gx!|}bdRg9BHYphnbyY4AIGMZ3a?4L)8&a<7MANcijo<%HNw!t( zlMHs(z)yDjY7``Sb$%+fG!^pmyd=>^&O(o~`6(pQ=>{af$Ty1ORt$!2z{63f#EkAP zZ>Dl_vpml?n>^nfZbDlZc?H~4G_J3NO%$gyjTIn{hB3c^EoAcpQHxmJO(CejN+OMdcv^v2%-l_;(H%2P zB~Hv2Jgi8@yiN)hrecsCB zO;vo6@!6;PEzV?~=D7=M`E+eBls?0A>zMd?7ski4$|w&IFd?9DRWr^EgPv!XtYE5| zcKDdIxqb$*pBHwR7?2|fm=*_t2`y;fm~32XigCcHE+ESh@rq_;-}vz)ttU7Hh2GeZ zp~kCB<fNyJ(lfKIawGI zZcrHqTA`WF?eBLbbnRu|?xK1-1XkDJmI_Jh-B{W!@~p))N$X9REn^CoD&~ zSz?`uwFAbi{I=q;(b$Mo{B;zEq~~~&TtW`&)%ZM`h38@F|jTlT6I;t=&E5Sa=Ibo`92KF37o7N7@%e&LkPmKY2 z{)ji9*rn#T_+%f|Q5kBya)Wfg00x@TV32YTq-Q+I(2nUiL%pWOjm!C3^KFO`tv@s0 z)eg-RrO`vZ;9?lb9O1dC?J)?Uy6b|;RrytGWOqFurIh=4Pu)k8Q#rv?;V~u*9P68d zumOyNf469=ofw-H8p{}OEJFp{t^=7!4rG#Flsr>Kl-(_loFBVaTB?!Mn@SDq_piVV z?;7uTd|DlF(P{)74hj_n1l>Z{dsU6 zL(5$*&#=~&(1aW0c+n40CaUs(jIjml=zTq_Oj9k_MI(xF>Fqltb!_9nrGvxK2PdhZJolz$k&bTc9B z)(6`5MRW8T%%FUWmRHPL7n`S#n+YZarOY=0+fbj%s&yna&CO3zt6GeOggxA~IMDCS z9!JFW3QJu^u}2xj=H)G3+cMtZgt+cuQtO7rWBsOr*v3=WY59FPJ}BsJpoN}gj#sv- zR;oM$<<%TIVRzulWBo#e?6<-G-grRkAAUIvP~@q|yey(-{R7FV zV?^_ZC#JW@5siBBWw0lyY5A$Fy+b_hU6q?*(*kS0r@cd!_73xzB#$D){oXM(c?0x! zgeM}{9F0e#T!L78l)9R=C9Rbzb2QQAsVb4~%|ckud|}ua7cE%U?3X*;%@Dl&Jmz5*OJALJpO)?@Z4xY=A&NA5-bV%n^%8>9z<)Lz1 zij4)j;CwESi=91ArL$0`5LBU@?T81qAC@vAQ$LU=Sb-pVgL7afC1Wzx`GXuo*PM$C z`&s0PE&F?bo$mbEv3H`)4`e2jQl+Rv&V{ONM|AEK>2{W!%H^|=+t+1TwrH)Ck*nl* z(OOOn3$@Cu(@}TfXUds!VvQXtAHuvjq>g7?vVV2KZ^P%A1*IcY#qoBQ__uQqkRf2}qSg!DxW)_F_V_|Jgk==6 zZ3Q(M2zPxkTjjCDY+Z@bEP2@@MzciFc-_B?C~_fFG8q>-UCs@T?TA0wc3K`{7z8bj zxNJKxQjC#BOr%KGb^>EEImAIMCI=!%IHB|6%1@>8P%*Jo1H2wrUdiPZM=W2#<%Ozl zhOP?1Mr1$4F_l0bn3n0QeTM=mm*FZew@xNk=+@f}ihrhZ=Ld!=nJou^Qz2rmc+09( z`Qq!_56aX9GG%-$Fe(#9lw)5jUbObygk15^b{iT~Q(1)2sHhSdXIECrB@WM&vMAz) zk8v=mT8gJX=`UMK&W_2Yl)WY)dyhPUvRh3yMeNcEMa0jwF5}8K`4FA9PlbD)bzo-J zg{cQ-W}oZS1IntVWtVI#ijT#8^RfbP>22FaC*O2?_R>Fl&ptLi`|?qG_IvbfjIU=u z`DgFhxdAP+3y#vW_tCS_&RdRs5c*e)Bu&x9^XC+0Aua9<*qCCxa1Qjpo1YDp(;9%d z4Is)jZ})jLz$bXLgJgQ4op3Dgdk*BY_iEHi42>M<$+K1dAMk9%fG%n0#>%yi+qJzQ z&d0rN*_D-j#>(^c^INII1Ng{Z}Ub&x#rAf`o)#hI9v z^(EWOyUALn%!Q0Y7@mBxZtg|a2mdZvKlO#E{{f;-bw&N<9z`vZi)4uwc8M|Uv%V0v z3a$UCg4d(lb%rbK3wjoIT9KTngzZq2`m7ZgPE*0}ZKq*YJYl`ZbGI-Sw`neitVgz+ zEHTlfOkv`$IO}K-i8T_h#q$3uhD3~?_FD8m5(O7COO`qpNr81@@$yzoE+dHC#Vx|d zBvFc2wifM`eZ;K-LKuMM;IZ(wfl4``NTrHopDH;J5HUHhNEWC<`zg5)DN!cv9pwcG zkHk;6z9&bLpBCT3jv4qRNPAC!?))6ajW@a-v<;Jo9&ZOFncG3{0K(i7qT4~|;^13C zeSd?L&8NRm9^Dthv;jAL!gQjWL%#b$C3KsqA5HySH|6Uk(Gp$IrW17}EJBbKq-k zt#?(eOLeVx<7-KIH^X1(!hfXTFVgTI#lxFc8oAa-Uow2YFvzt1`Adqn3xjdAp%uvR z{m`Cz%vT)IR&YQLff$iID^zqq4no=%L-)%Q?4wB z*FCYy(!n}wi@^s&d-jmht{sB)w&Q_vI{EtRw-;f9GVv>{m_mK_V6cVSmVv((^qPwI z(}I{djPhJkR9oh=>gr1@4mp8RAe;<@K(Kpzi08k?hfQ|GzY`{)xE}YdVJ%d9dWH6N z=NUN0jqQ}sX&@s!;9Q*Z)K$w&)iPa`si{=NIgCLq$ch`cAEq+w+8DGkHE_f)fqe|$ z8hxr;3ycNt+!AOQrpoRov_}qnPeu;KpF`+|@zpN<@K_))u~+{?a|d?G2yP}*-%UqD zuNz7IL;J*H(W>SXuDEOaei#^}n1QvPAY01QmK}N+Id#g#VyXE>_&D6thvfT1H$`ni z0qb?XEhi|=JwU3Tr5&?Cepn2pyPFShrv-ruAV_rH$PPuEy<`F+6^rtTTFb97rBu9wTi)5b2Dw6%D%FqHl=aF9^#}vw3 zdZy@1@-ynOo>b?2c-lcW++6|O)41ufpjhUM)N(wI(Ow{P%Rtzyth_8a1EFXxB8&;B zDysL=aL`Ge=|k@A41P){pOp1qfN30v0-8DHrasC3IK&B~iBXjjy8;B^mq$+od@M`m zBPN+KPGg5YuPs0pb0h`bQrId$kcTc+oVf?R&6eq_5Fwq49`vIgaJzu)o&F3*oJzI1 zg{*p0uUO6yDb($lzuSg3%=0G56CLqKt4IbSNMXG;RG}<(ry2k#%(+(%2N!X3i5^72 z7!rwO)6zX2n!?RGeJ73L0$TO0;tVxlT!}hG$}GF|aB@Hn##3Q5uf#J>gGD;-xeZ`F z6EStKs!L6OoWm*~>Y&w-!FW6=l0)$fjI%wPpTEgQlP99vd6lvsf$l`nGPDv|U5Qj7 zhW&^iJ*;)3m=Og|rnT~L4kfJbL2A4R9b&ac-@;HVe2 zk+lcZy)TEMkfwMdFfv09D?W(K4rFG?jAA)#I=WZfr3gt^U|=?LNsXF;)v6VJ1zg-{ zxf}(Q0p|`rmuoWFsX99)cC^Cu@*_M$h+ht!&cy94)GekACnhsP>m;@bMmj@g7VGTk zGUGCjd0kY@2$?mhiZ$o+P#zEqbmxzaEV9e9cVhONif4jYx7!hNfz$Mq(MU8DpcyJp zrYGYeJ49MaAI(E)H|y)0)bmQv<(yoZ2bBoX&F6a!v?&#R_v$Q44M!U^eRrsUG)tW@KyT7bcK6x zG{ec-M;ED>x9BUV*hzeq1JgP6J@MV`?}_KFdJ(QaYr|R4w-lNyU9f$~Su#r;S5J2q zU$HO`Q_>A>mP<|o-J!y4ck-O#g!WS#Pi(~;00z+x=4M59I5x(9wqANlPtB|AR0fIh zXrS$1fPTS&&y@pgOg#Jff5PS|;bT0Jbap~x8`?X>GK;>2{!*~FO(J5zP zOc-%cTg8ERtH?qqFG5{xT#szs>+|)+-mrj8Gxgx!#C zZ`0C#fL8U@NS^HIrpXIyv zEadd5pT7`!P4omnsF0#-?Qj_cQP%*3J6#Wi@OB5Oh-gD-ES>vkMu9&HqBZ;gN7M(# z*@5wRundqB95En7k?;hH_0%a;zzRAWqKJ@&E3g?fCS;2__4WQdDl6&@yr>)ad)>g3 z9WjHOuT)KRuS0DQ7PD!nVS{mLLi>36RiCgrq7Zax5|hOYx|@^i9HC7G9Wx{vI+Dv( zb3O)ekph3O8#q>Si8=yzsdI4=by$@rG6*d=SmaSxPceQXiL0a~vw%fP30r(8aqw2 zb#F#@?~Q2ypZW;U_dak6br&Jlx!8P1ljV-4Rh2?=4`lX66$}SQcdyMwM*uHmCo-C} zb#xDA1ep|_CwtWMn>BsZ^GcKxX#sgP8~lOT^oj=Q9?M%GBH*oY2R7x(p-$kWOqmht;)O5a z)S1!KcKU@1@O0!Hx_$M_)AbPk_BOH)9SLE49*)tNX8n=Cu!!>>Ex6O#CwE))L9f>_ zJ2n6UFZ;2|acDwITeQ%Ihsal(2Jt{No}b51z6*EcGZ6UI9vxvj))&Y6pnZ(M$wy>RyjDZUE?|y|D9BTob|cMkXFkl}?^Ypke{t zYe)s2oERQl!-se%zH~MIN}dChQ13&b{-~%j`rM7=K~&6hVT$r_+wc z9#W0byQ*|EEf2q`*U7vpnOZdcAZBu?9+5eH9n3P)MUx#mXEyjU2n*YecyC)5?q$<6 z8b(cWufRo2FKgb6U{9~Rb09xC0u{odGmPFAI_|{|5RcL{j>~c3*RWl@~IN#|}}+)ov^Y^$%3!^F>^B$)Hu z49$SICDy!x3G@Iw7)2y%xjbVOUi>>|W0f)m&xFA|9@71W%f)Q-;5GyYq*+)Qe`4FA zy;!KnmA&JQpFZVdz|L*B=kDvIw0F<~@-NURjAsw-Ib9A7d5<)t7&m*=n)=UJFNRZB zXkkAzkxgVsy}Ad%FQ5zuiM+NRMo*!dkXed>zmg4uUz$0D>X(bTc0@9RV0o#n$a*7N z-z+ATHk^9IlweF2fZdv7KE6C5Uz416J|Dxu<*N9A*&UTxf{PIy`uu z;@lwRG^gq*63qxanTIEhLi-DFUjo|HLQ9NxQN8Iab@(u}w6%y_b6r6_P2Y=bJWX<= zdUfub98v^DdWhEErFi!o1Tl^{KR}lQ#GlVW|8dWS^1g-_A}Ia{5?fcJwQu4(@PQx` zE-BKH4ux=%LO7Kav~fX@fDkK5@xeAblBHKQ=q0^A4jw^_>?c2$C0h5B{}e-U$AZ^C zpi@vla%QScL9hL#iL3Bp6FealU1z_=_lE?vuUn!N5#n#1@_5=3bV4+75j)9c@53#p zs=au)%2@}u)*tse9Pyvq5X3Mcdlex?xUtA@>|G?&zN^}H#J}Qe-Ow56BbyV*k+iPA*%8VQB}=Bq(bDk5Sb`<1rjN;zX^jemfs%n;Z_oph z)B*7hOgEGnC`1%_5x;H%GD5jZ7D=a?Y-v0Lr*XOD501?qZJ_?Ih^ax2B267sY z;Gft>@T(*8WtqGknKP@PbeheRSt<9>4gR1b)&&Y}k%#A1KiIY>L37q2nu6X0hCxk_ zqB$IPB`QqXbuv;$I>SDU9($qfO|#7d;_vthTp?Y9+fAu!6Xu;@-H*9H4sB$U~ zv!)~5jU$-#_yF9Xr|R7Zrgx+x*DlMBzG=C>6u&(_mLD{a<(nOvc&T1aqs4eW(X;mfe%O94QbNwgoPN#O zdQP7QpJ~FwTWILS zcCk#F4{}at5(mX(f4uJ8#qr`(a9R{@g`EqqJpmG_bpNL?0>})Rf-P4rrNEn>lA^eK z5Zo1}K-u&|Zf=o&Yr2?XQ0x}dPqYky!etc1=pEbJw<8W`hSIZ4qpy+KxE;}l__Bi^ z(2?}Oa%cuWzlBr<;u&rw@^v{{G}8Ptvj8dVoe&locxEisxk|k`fO&rznvcLWE}Slp z$9@gdWFAtO$PUB(_HlApQQ>NNQl%USc@@c%*2!swGPO!h14yAfX{?-vD#iM;1Te+2 zVmh_}Mz6%=a@o(BCUYp_3u6t5p3cr*B=hI1N!~1(HP7V{9uQ20lF$=^10i%!h0YOG zSuRyG6jd{Npb8|7m(viBfRb_Yq;WEJ29v~>xh%K}>|y3nHCB<}JT3cyKxZnNmCn`7 zGV<$_ab1A*H>RZ2M+vkO{2@xp&74)L%J#Ie7ITMYih~%Bv$EkxgAJ0odY#a8ohWLb=z6%PH8N&nwd%w`z z35>4Wn{Lj5VP6)AqRSsO-l}Rt)AnPpd#p8L1#Bak zy`ilpB$xwWJiyIa9~*I;T?boJ!UC6bS97Lbn~ngIVH2o})*y&@mV*h4!#x z=p5{YZHSNt#Jkvo0YlAY+sm=L*hbO)* zKtoi&Wbh=u3_CP2OQA^;Min|!q$gC8V-mOLB1CF{A}&lKHYHJE5^)4u_K=15z>#8? zBSO4y@obEGAa){d_Ok_ZQ>O$`5_&$1{7vdOWH4*6q(IEMQ85RMNrp?+i~%ZYG=VV` zPSTTNX8n8>TdGX@W|bK(M7G4^8`z!* z>Ol7`XdI?rQV;s22XpC{7SHJyHtoy5oYz}vP*qD~>WR=Pz#0&L-G;RldQ>Ms-3c6$ zoUNZ${Zv`$;^)|20lo%%3XY(aecYZJ6e=pfbMQ6ndIrIZ(^~DIDmj>*XGKK9WS*+Y zdPMz#jG%K7on0XX=s_Jp+~K*ORN|UW*PdJI?$x=BIp~m-h4N;Ut57>?#>dUX`?R35 z8&CmfA%48N4vW)tC_F57jwLPzZm`iNzS=6{Qg6hKK}6k_1RNr`J83cG9*wjRar3A&g=noqlSC~Vk`f%EL&%ayvy)g` zm|2Rh0I^JbDsGX4J{4ukVidFDDNAx#V$5Jo&x48dC~>mP;-$x(Pb7psXJ8~2VzZ{2 zNxcY6=`G7d`3bK<-%4a?2+_5NkZFagXCD-d*U1DU@avw3Ot6zvR>A7dOmR3?p-27u z;W=vZUlM^C{ot7;`k}#42G($zT8&E+(K2v47t)hZrji0w2so_($7bSoOB4JLfr=Dv zCPc5|sa8~y;eAQcFbS{DTX7De_3H?~0+|uQ!)~X*Su8|ANn3^@LfEvQ{azRoD%wMr zL&t26HwQVmA&7VzlaUn6PSP8wn2d@eN4$lkHtW3}-0Iqx&krt&>aD6xYEH-0xzTMQO!D9EAk zfI?Q>x~NZ@RlRl)w>FQlF$L*iN2-b% z)2E=o!wg@?;|RC|P9pmLP%Wa5vEU9_6J!4-@!W|(cqJY_LkaSQAq2%yRk}ZchZpU7 z_aS{eaCSK=eD*CMs_H7h>Uoy93U?ZYino@=5*wwy{KxY;_=Ccdeu17{FbeKxZ)NAx zJE^SjFDzsxa1^L9+*GX)bOS=LD0WRWRFNU-8h_r0Ha~kNB=-2TZ{dF)n<3;eq}*dZ zSB8)IeD#{ux;{azb(=k@HHwa?*8Z|?8tbYxpgyHqqsHf?*6969qj7_CsJQJE_!4bK zXzz1!CAzs`;@fB{OC0)Ay?9Jg^M0qIoA+_w(De^u{h=?}G0^1>m74!h^+}}flax<- zoIaMoj_dN}?6}B#zctRSH3?71;0aE4fIY&()1&9RwvlUnX>`4DV!hULwFpo~i@G8I z+2f8pVL#$1pA5*`J%<@etOdsp~d<%I0zpMMpGsf7y{u9Z*I{ z-GF)?!BFFKGIi22+BKUs&R9B{I|fVtRKir_*#972L!XzBQZG5GQvX-Nvn9vG>A$0e zUihus>`Av#bVS|umvvKH*E#_8DRmn)J}2E~2kH)EensD}T?^X&EEOG|p2yAJ2QuBL zkN<|6e-QH%2Qn=IDEn&C_|?4(PQHO`_0a}0%qn%q&cH;8Zz-LjW~NU*Yq?X?UeUN} z1t_CdM;_NcYasBVk{EJ^@1!S&dWhGPahJ?9JTepK*$FUlXsf9Ked@h|>SJg?!4O8~ zHfl^af~bZd4pE6i>jZVwtUmSLY~3+5t6&JD*Q^O%>d-E?$n7>zyZY37yA8+Cu7V+q z)B>N*QcqL|%u9X9$2&fFLd+7u>uu*t{9|`NGcng5ai!%}z0wk)`=EoUwNXl+cbk0TXG zcB@Lm$Mcx0YZM^`3-Ti*2>(elAYQwvx^IOD6dY3rftexWn&{hm@9^4jPrXb36uraK z(!>e0>U+~#T^>vCK*2HgPC>@C)35j5jq15~cnav4WjE06-6-ml|8y+90|m#}I|Uin z#Ly%3PDdy1q~);_+$rPmPtmoJZr9GHuIUpm+Uz{?+8EsGr=L-r?TBmiqF67eKijBF zw@IV^TM27oD7wn5iS<_Yw;vgwkhmrW49ae7(G95Q83k&bd#7F#`)qmO3Sak_*2F&3 z7ThfrbOWC$OEtIDM8ynWhFR3or{gaa8PAr}>1c}lcW9f->AC+6`mEdRiTNlxqCWe} zy0OxYkpT56^%*rjCw-2eMVLVp?R`!z%o2QKZy01L(m$Xu@KFEXnF;y zdxaV*9zM!iSOs1)GHYQy!X9BQtk-(35&_C6Q8(nD8xT70zpK-WD{5B-0cBM1$fMt94FrN)iAh`4sppjp zm&C0eiQU#LTp*80gma7He&WhX0(6g4;RF~)W=>zC#UOggn$NwGO%T1%O<}yd)*^xz z0G5{{C`ssX?vXmG=Pt6SddDU`w_eSpCUm#ZYK$AB^Db`qI&(fsx2`*;Zhgb; z);Ab0&b?EtD;yILX#NH^b~WeS+`=B*lj{fdbk{It_$tQsE46rl*vRIAgPwG^#dY+c z8MaGekf+sC)bv6bJk4YP2uIxeJv=(ikBk9OJu>Y>br^tgm^ymJgndq^*)K3TU-0P* zEeQW#(*lY_Jh$R*)k9x-!%hMl0w*Z%_)zz^IoKo7K34(p6>?epT8DY1quo3zt6%Bp zp?jZ+4KH`z5tQa0^!Mj{=Kk9uj z_>fOw@TkUJb=dbX&JUkg_fYlo`u{>cNk6ZTkT0jQ3*W86%WtjpZXCV*_NIlM1Ncid zp#!udV^%-)u3P4ncr?2XpR&P*jv`~H+I&y#wjU5Jw7G~#+nGh%>tX-bkmyveF$>YM zQy#|wTn6vbhA+L3!4utMD`fwLQcjWiqGOL7ihWDkf#Edl7l5q*#>@T-WcCaxw_sD@ zErfb0e?mv@!M<7ab|w^D?cye~0~7Jm+nwr?Hr85$m=K@I>qqbFSMi1eVQiyvJgCmr zpDQ{^PMIXJ9r>tYIeVNORUsn_0J{*=6N)b@cmse`?6F82iexwsW50`4hvAfM)v|*v zOPmIoMs>#Xi`>U8ka_E|rGUtw&x>I{zcKXb>pa?4BkU}Zsr?u#?A(NXII)vc$^tq0 zTsgE{Vo!~1kz9!lfB=aU?fjD&iqU5wM`A0My>bTily{-)+hrI#5_v+@@h&tkSfM@6 ze%gLdoKlHp2j+4%-XG6-6D`ltjNm<<zCXdu1>|CtAl+dmFm8$z0jz~G^?8mp% zfP*${lh}+C`$L4T58dTtW4FPaRZxWsZp51zp{g$Rod=OikJF~nb`7Gl%jt4DB|f~g z`2STYrCTP4BO8S~keyMy=#(8hE}<*vHIQc^6FYts%TS2|r|l4SWTPY4Gsv`#-DSjN ze636@taqwHGo9=`AcS@YyWliu512hbOtLE=x6L6iYey5KNQmU#{w7tCk3IGO+n;aSYq-P7TfEcYyV5YN~SzcpSP>zuNPl3>ULq z(Pz;#h&&CLAs&3Xx=La*(MqSX68wRF;}zWu)rz>CJ}H3>4ESYSa#Q9{IV;}x=yGUb zz1ji^)+?Q_)BDLG{&2?|67SC=>vH|eTU+9Kks$)=RJRkG4IZ+vsYze47wV94jtpP0 zK=$2!-nl6Fg@v7a#98>pmN*TY$chp5yfxtIMQQ^gvi*t(zYv`*ehAofXFq!*?j$x9 zbVB*GD@0})u-~&CdXz%ui#Z?+W4-fvNV&777p6U4f zF?O85SIXd7+L111_mcr<*bl`SaBy735f5TuNp;qtC3-vga{msbX-&`t7l3SroS`%B(!V0S*V&0}TF=Kz?z9svG%yvP z90bxk@cC$Zw;kH1w>EOlKj3uX-Eiimpq&Is0KKu2z90-%X6{Dwvso6$iOu!0UlXn? z%sX>`aM2_Pf(EB4_?R9xySjp8K~8xNwjWmN4Ov1YBn8qek-dl1r-?S?id15QKfIPM z{t6Nmu@7tyjPb}#XlFe7orRATKI=6?+xCcb?C=|}!Pze-=J<3v)p;@knP$-TU72O% zR>kj0+D~I)X2%Y(2|PXDDUccT5!b%kcYGO6dIterYtxFb?Ht0VT(LPQHyy zJLh}G1TT<|k&6^j83Xn)gunM0Lk>EX`5-t}j1d@8pe7DO1Pj?#`KyOOG6;3V#)C5F zX*uXQ0E=&JKj`2K;^Yd*9*bpoHB5qJ42BGa_J1E+0V9bqoRSQ=i=WG~`5{0kFtF^S zX#8RD4tpbW&3mxnWQK_BSCzL^q6kgw-)`TJ?8hvfUqC<&xL~vw8?@N6tm_RLBcHeJ zN5nnb_CNN)L9n+v}|M>c*JW+Sbq<;}v`i%VB9Yvz@% zHh6rrEm{T28=Bi=)y*}gv%2xN>W!wt%CbeJj09a>xX_x-e2dmv)GYT|Wi(id;Andc zGF)f)TdNt&+-r)~w6uGy_c3(o>XLYdYVmqqSTuWal@;?Nh(oR6OG@W1DJfe#Kf17V z@%)twqVr~#xg^Inw}LgAT9>a7;R>n%1S_<9OqvM&Xsm9oYqwgf+aS2Q#)kGLqYZ>+ zVqzy*20o_E?zubjmY0@BwFKtQsfaFKMI2R{?DBabGD_)}s~rN%F-(ZF z8e>E(W381&IHm&0S{1n^(dgQahQ?S!GwiLYrggKm77k%U)b#0;(&a12a2KyyG^cbq z%Y!Ii7h9hQF=4zfr)*x8XO@~h2GxW~y59z#)GpC=XrIx8oV4m_gMp4{^ks|ZE?K_3 zbS~smwY1bC6O7iZ-C&A+E=rlXt_B^UwpCtD3w++DSd^TDa#B+!Qwyq1u)DpXrP*3L zWwK9XrD&Ixou%H~(L`k~|NR{I(owbo1q5Sif`YsLVo8PN%DQHKb(^K+Ph!_3uk@)D z`IOlDwz}%tb|Yl60yvb0ScCG>?RD##>fq0{*rMV`&xRWpM9cPDDr zlACO3ZtaM<3hvW?2CMeFTO&oyUB$9e^5k>jL0ue*_&Ap^S8?4iO*vjN|zl^AC zYuQ{~sm3|``HNQ#nS1ioArnq4m{>4j$i(pzPnd7j#sihSO zTr0Q}CrqqIYHd@4Ix~{Gc3ia9G&XfKx=@{7a_vSpxAtr|SySsKI~6*R0rU<2wmMb6 zeANh+8eLc4OeA!wX3zFRG;@iI+4DegsQ>qMwOtgo(yIEQXes%>sz zVr!W#YnxPdO=E3cQ%j5(ThmzI*a6dNr5ZJj8`tvBmb$hUN^D~6KvdU!c1;s?5bf2_ zPy4#|wFrbLxv9MkKLElAi$r_%dg3Uq8#fZG8@DJyHnnuFrE>h%HndHdOf9K2^}A_f zbwiAZM7>t%ZhZrFvUNiTo!jbb)>AjAEVi+&fhljRZEYZm+G^XH)o*(p{60gq#dJ^G z5iqfNtNiwL)I|Gw5`O#o_SgoM9W#uHwQp*yjkQsBXEkd9nk33vHxM}+*EY8_GM12GTpCqM^2q z)6j!Xr4fXXRjj_%$O+e4-&7$%`Wo!q)C^ximZq`SK+S75Ro8015w#w&*0#F(x>(J6 zk`K_eRipE|9s!a=ZmiQX(^LV#kkZ~nriyxCH_Sk%n8S5sZHzHS`F z?8TE&C9$Bfp;?zs0m-IxqQBaN@e`>xv5w|C*L^`vmZ0D5vD)IAspByNXl$g2p`~qe zlp_Y$!F%KI(%H*rFDhM$ut7U#9r}0!WE5S*IMvb&cdcR6U>3#bQ=O1?7Nlgg>hc)k zlY^03niilUbIMjm6WY9*Z_ppkXAo6ruH!#j_VGLAc>)$@0<_D==epRZ^Fj z=1a=vm#$c82x#euAwtt3pjExLy``}Oaht2HMix9{x7^rAYj|5-V|A>d(+6*eL)4_` zG8q&XsT}5CeM1}iqb*+tn;f`7Qbdrp<{GjUqX%w;s_g}HAHDt%?SM^fsM~BZX((*##L|L6 zM#By=+kua?yS8Cn9ayZRc5nt-4>NMXFoEvOP#HAcfXsly z^q{e>K1SCdQCl68-cnyr-1UfVh>dssz19`u{;OlW4rMOc)}ruUm?FDXc)Y-Dhg>Xq zPpp-YFCf)~onznthiD#V<`;!-etS~eqSg~5#8n9ebqcU5VHZ+Au6dbN^ zYiTlRJtx9X0~YVdwZdxPx=AY$!{VoN!j$#~E85hAktYh_G{GTkiq^F@0Fx^1urL8Y zY-E7g>2-ipr~&@d{J*lws)sLzvMW5KC>n*%SPylCX9d(e zq9}sd(^?03%z-sgv@{ltHZ(Oc|Kdoal}FW4ZdI3|)#~;QCu4X(EP&dY=j49kiKp?) za8i?sHVx~dohR2qH1yM9F_#p6!ZR@*kr7-cluk2#9ti{@u5Ia93q2;$bD*)YA+|mW zmReC>DuAD`qQnnqAtI(|*%HJ^5Gf7YwV3d74uG(>2@Qxgw71kuoJ2+tYw2j@LY|4J zVp>LlWTRMngD%uJR>O9pnAmQB(W}|B#RDhnaQ>H=D(0n~HI7-`tl2(2Q@!$4~|=oHyj zTm{e%Xw^lsFTP}A)s^Wnv600cjWsj@ig~MvXBH#GQfyVr*A zC!HQ?#CSAQ4fYc$m;k9jrXK3rXm#+aH{1v=n9dlS28+;&wqPYf8v+(cqAkshn;FNv zGAwVKl|?sKby6*6p%yfti8Vqi21|3IrPhO^?X7SrSnB{$(0oX&v7<>j$aa4jHP4G; zTG+5sk}1@Q*$1~)h|XeBzo5n{?A+WQt7|epb+sEN7S5qj7QojV zssk@ku$~eK4S6Qo>|P?FDP50ALt7oLnS(HIhM@&?1q_OiC;|%H5UHC%q!w}wXeW|t z#L@%TxCTrf0j-B78erOSsbC15TS3=b3r10<3W{){Yid}RBxMOLIvW#mfT~F({2;CN zt5%veRF7-JC|^5{Kva8e9nB;SVy>TAg-jfn)7DWFi;@ZfUMn?Hr1to^;so{Ffx3ro z&cVBvzOd9@6?|do8%ym|f^RInXIb!Od)4*9dzL-74EfJ3JG{(3#X#d%Ral7%cQ3Qg z4BowrE3BjnhnCqZgNLZXncXTZLxnq+*=GdrR27!#3d>Z5GrCn+jtYC1*{28hstU_> zh2^S({xr2B=+&imW$;yy9@w=|(NW1zxODLc=)@!Bq>BvIHYo)4Y z@L=iBQHvgJv4U^)fS5CV1PH+WuJmn16S%^CRGF904{Wu9m(NGxarJu*k57N)e z<-eexx69uy7n_ra8#^mV$b{ZbKv@pH5FmaGnC2RFd*{qsW%>3OP2>Ew&UN2FS zdA;N+@GE##>5ZlFnxbl~M2*){bG8@o&KtLYCuI`ie3=+?_rVWp~|rOv3RTaV6EQoj)?RWo?o%!g1* zN&N>sAP!Qdj{uV5vooJjS>TE*^=D_kKZ~UP{wx$ONBbz$QfK%|m30KEqxMXa`a^S{ znG2~uGxzzq5b3YxzDYlSocl5TTvBpb34U%axs84vEO~@}o+){zgr)v^$vY)+Qum=O z0bd9ZKL*Sv^>)c>4-`+-E>#%KpmSZa5g4+H%{Jnv-`__`^&z7v4c0p#P&Ka$?n<)FHSbGMKQ$!BT+<0 z^o|j|W6_%d(YujN@6B{D-~WGRo_nu^aPsB5{Lpje%sFSyoT<;unR%xDXqHC7D&bO} z_M<&NW~uM_F(Nx4eMFj47y3>b^@LKN_M;utNR9c)te>FNXZ@t|Cn(Z2KiSB~fu9`Z z?Me$U}C^0{$*%zAFtb5iQ-o~ssXgxh+Q$md`e65?T#8LL^Uy68d?%KSpn zz&6O{z&3;0=rSMFW-$t(Is)$*>@s6s7kUDX27I#p>;lmi4+1+ zLYs=P_pLVhnHA~+*jseni?j@}fght6JXvfjTdJnz{gyEE{gz{kV3)B)6-C<26-66? zh?x2I_tU`&4_Yp6Wt+Kot9}$$QMASuqKIOjNL-{(1YhqW(6$498Xwb(mh-V3RCUeV zyH(#-V&=ZBHn#eUX5QFpODo&VTUu4Of|;vZ^(u;(d1I?X&{rL5b-&eLXXg80|69#` zwRuSkn7O3IoR+Z5oR)K2YBSGmxgUs#nftc*UOJd-K#PqnY%>pSF@oaew%lb4QADx9 z5*O)&z&ErdXxjn5gpZ{yxAL(SRCUcfw8iiiV&>s3_P6+pX5Qc8Pz&44hg#IMfSGGr z3~3oL^Zpj+pszaDqNL?tXXcWYLt6d~W>z)L&ozgM&ow{aJTe$)Y)2d0QP+HpZEs`S z8*Fv+{gkl3`GIH&g2h~1V{Hk7#e`d9Z3$J)w@|{C=3ApB2)3h*wIv9)y^S>z!VOaZ zRh+vg7xMPxuF1nVwyw)sTHInccd-cDB%7X>!t zZ_iiViyaked;YF`6yC1fHyRb+>6B4;JpV;A(+% zZz3f!SWB?hV0{(E+oJnkr1ea`#2Q!+)=~6=m%*A-C&luENm+czHA_#UUe+koTi{e> z;cV9E)y-J3siQ{jY2reCQG1#owu6o}b3kEuL8D%%|9!8r@JJzR&e*22n__KXcGG!H zF~M8gbQ2#3n;zria?=}p^lsL_86KmWRq!#p+3aSn5AnXYy4g0?y~z7sl*neV9trWV z$*8yeQ>BgWH$v^d->5np*{{x?n4|l?i8*I;fIpjaIS2U5IX5+)eBtf`%!3-E$ObjO zm+k1H?rCPc1_*OUFkj69{;Fm+9Wy0p#`a*GnByv&{gGy94~Cl(t|yfaI+a)KRM6{4 zDjjqxuh^+{kin*oik*bnRaW_t&ECndcM^7qF=}?IPryq?z|yb)EXqjwFBt)dF#@nU zBORRlQQPX+_g=P=HLF_oC4)^KZPBnDQ3ze?5Yc+~vTIxKvbJ7rX<^HS?FDJC)R7it zAt9q(D6R4%Q4HDmq_L|EnPe;hsRcma%XXGoz+TS5GlE1TAr6g%MIm(PH3ap11jTAE ztf;(UI=(Cj66-k;w3)Omj*2;!TieqKN`wiI)8un-ee2+#YjRaq-A|ipB^y3+*@W(SwWuao9 z$|@r}DtuYPl?{OqDIFoDPa}l-XerV4UijOCe`P}{IbmoIhLT1M*PC1io!l#Sa;W)| z+$%b{4m!D4?Bwb+09)x!B1I~?Q5#Kwas(xMbP`DtHmcWY3%0D6j4V_KBMYrT%K}Pd zy<}t|Y-H7H7LYVLqjpK0%F-o4IrWrDxV{Q&bSa22qHK>6jIbSkgdsnls-TE$MnEKSEKiPk0TorGOtjGA>C zl@{=l5rFn$1fWTYfR~H_#25j!T9R5($G&IM>alOQ^?*FuqLKpbh(d@o8cjs&ok^>0 zy&c+mc1zKOVatW>1!<86qKTp`6t)Yc*{xzl>djPX6^xCviWN|Stzr-~47Um*F~AUK z4={qlw3H~*CWIAjM_BE)BS_SBLE2-`X-6-kG>F8FhGP z^8IA6qi)}aARt6aM@YHF&@C+`x{3>bd+<+9kwOv%>;iz2;d+znpp$#WP7ak^l6yrb z*Fh(T*GrvTow{x--ASZKoi=Ktwo{IvM2}7)Ny0`}ooa8(ddbK_^)|9l@wF_VL>Bfy zU<9cgMpmsB07)f@wM$|rO^J;`%BiPJ!mUAAqf0@I5oLRnV5C{t)U^4|WU;wc6k|!) z_}8@Y?G|G5gbgn#M41~YM4F0C9c5sh0>+ZCS($>}YDy9}B#L1e8)-Eq(2uR=e)6@H zaH|m{+JhkN_CO@Y65{N3BdC`(%8VsiUYe0-#u7oIDT%zuurV-sNiy2al4J<(jL5!d z1X36IN9{?xpNQD|Ocvt>tNM#5UZh^9an%I3#i0>xiyI$;nDL?OAzhHyLrW8YUz)Hw z0r=Gk8xjOxtG26};PE(4)8e*<&~~$M$8rBu7{+2g`qtd zHY7aOrgSQpBrug%>{QUAgsFp0IU%FHAT81sG*Og=gp78fG`j(ej@uU}4S=zc1~3{bumM~TtxgCx03tDL z5N8h?f_h$~?5Px4(M*KZZYClRtLuWqj-#G*krE9+koH(IKD0Q53cfgmwCsAXPQ;bO z16dLeA0_2`(Z0aWX&fdN5X-3J7>1=$ROyMn;)3Leq;TGbuRs?t9< z1{Hp8%%WIS>_xF($ATRd{_EHQK_Em*M@ZSv(0(l?y50+ad+-kkO34WW_7*|;SiQHC z>!6c+#ZC@2Uy^%8C)YtI_llidod#e_=_FF5q8qi*1Sm&PqDLo@Bw?d^owi`hddbK_ zoj0=38ni5+L>6{dVFal$Mpmt60ZA9?l*A&M5@QMF)KezmCL*lSr69(LvOP*L(pGG0 z+I&&0*jy`$W)n94E^U0f+1NZ`!%GTL=0*yU_G42=8Ca))u_SC(reHUv3;roFVi?9o zn$iXTqF6SiuVZ%w!%az$Xe5HP8wrsZONg__5<$JJQD!XB^3sevGnNPv?Mvj@?Msko zUxJJbFjoU(W6_w##v--Oh`fY{vNc7m9DRqedwgk>%{2P@cjuo|MY5r?QCiC=o!QvG?40R>-W$|bWeQ0>az1cD?hqi2G>v**qoPXbjti9SfSv(nHB+tTV>S4s z1fuN!$l257`fuza_jE0HMOzuT#}}rTyH>euJFjxBcfr!@UAJ8kQ!jUIXS&;6S6pIk zkrG*$mSC-g=__Dw@!xtESz-;W7p9}=1z&3ySu9?o4!B|Q1Mb;g%r<6wkNLF4kNHOX zfgkOk;0Jz!f0keHks_#3XWdZ$toxYHRW|)2B@7+GFw4(y9YQR^3L*L-A#865AAg|` zxmbR6_wiv+>EoMEg)j)r_Z=fbxI2J*svn`LTCc|k0g-~$Mo5t!-pIiqKskaENu5NJ zgpI7n2LX|V1zC}W9^c5qAfROdC9+;JvJf`1FbLp>juwVp>qWV)^&(fVfNwP(CJwO| z-7`Fho#8?3i#n$->MTZj7gDRLHiz4wR1UY9{XHNel0bXVO=^o?aFV9OE}zgze+YT%L7Vbtpl|sz zBif=0OlkWS9(GE5Y!x50fkw6TP!dP!i0iE5v`lrZ&0YFw2)Xf}=}qaKwRA%zsOL2b zN^_T_|1%}bkr+Vop^*!IK<}9aKR{#$9cgw>guatTJ;8gX|1*`(aH{s;hb7O#d#2>s z(r3}fPIz`IA4{HH$;b9*ck^-L*|U7yefB#(O4^pR74MlTZRfYGwQ~|BvMsVnLOg7; zk;(P>r6O}z3zl#XaYC7ba z%QiELT$o?h%pGlJ%-YbY!JMFG-p4fA$EQZlDO_I&5p$U;y&L&ca|(@ah{L|BXcUQ? z$E4?hB=A9KsM4JgHguaTX%grfK}5u>7)vpJQxk!766A*%a-n{qV43dv!tI5stn_2- z!4~eKb#@i*)(B7_b~jYj(5^yfSKJ1u*>)FxUFa%nhhe|)Jh{#nUXU>2)o#Gfw$fIa z!qF#@N}MJ|o7Qq~!C+dQF-@i$XnK>`ESK3$=4u3c085e5<@*I~J68Z(L_Lgi3?taa zG?`P2y1LQy#t=TeagPF&Ne}F9Mt96gqC4v7SL|q%F0H4&120Qgn7i2NPj5Ub2eolz z{vvLaFUmiZpZu})QQA<|NSE7>^FOjYx!OEn9hIZHS6LOHKZMP2K)PfcB7oAX@;BuN z-nF`0S-ziTJyTGjHs$ZM5x*b;?>Qh^ht#OlI>>=<8_Ho+&ZHdmCrUJdh{uuInX@|& zP3%y94VA3P|299BDt>G=%5&7CCdsv$hd>%plapS^s>z|E6vk&w{+awh*C>V0HP3yTW%@ z_pChDmQrD)Urmaog!bRcv851(`n1xKEd*Rl;{R6{hVb~1a zz@Sx21IHbxmWCuIAc-`vF34o{aNCS{f_Huz^D{r~i!_)2H~?k{h^yd+k%bvSGQ0fe zrCM;2z|Pm*)RG1yFKJNH0Mnt82IUQ0AHzMRyuqXf-tKLOsyqZ{HTZ(t9P=}lWrT6d zSlS#_)hB5~sRCul()(n%K0*)OCu3BGp66Jqh#ufSH75dOtf__3!k|D+(v>7oUP)S*47Y`a z$^FbyYrmAQQXHKO8K}-BVQ4s)bTbJyznSzEsm#&?BJU@mueh(VwwG5;(v2ic25tcB zcuCg_85``{rAm^gCc8d>+8dL%Cqsqp$$OJS-7Lt>iyf+pp3eHv%BoN|u%hn64PsVq zYlQAYzxWTWY(tdW&C0D%qf#cO_};fkCZ;S&NnyNisKfUtjPX zdyK-DosTqammjLi|9TV@#nJwg$0f(Z3rlx^iH&^O=xxU(PvT1HB;+RlgO7AnZJG~0 z)>zUb4;zus$E55{K1C@vlKZ4Q7S|`G9~M@T&AE@q%}rUzxP{2{FCNQMZkw$7^m&}F zwV|^$Oxgcw~mcz+ffzuOChZ6=Jm&_)yH~Ek} z55Y8n_brrVp5sIM1JfQPmSpPr8dgXdwfHN_D2nA& z^2OxZLTK^Y7#hF*V3c3H|@FeIz}YE%H<2V~%kp^H|KC7&=h^vfVe!cJIS~`MkEQO#dUfJsm3H1^{NuB?;w8K$It~P11<9N!y9op0rydq*bEd`aEzafcc8r8M~M3NF?L* zMS2%3*r+>!vRKEj)v4I?@visL$&@6NBp6^|5_*fS(xE^*H)@^BsXmJ>i`5f_xv^CS zIu-j>yvCIze3GPRHD)Mqn&~mw(D$8+JsTVMzG>$_&i|0z(%IOHvA%aL6S@dlbty^` z1|=jJRdgja#DfxsBt%$;B$U@n#;We}Lm_2(FepG1wubSsb7S4zEKUUH#xAkJC9x|2 zen{-8*egN^Qzx+5z?kRbAH@^I*fJ)xB&2g{2h99P%?!tYLjn?G`o<-p-Qc_Gt5CZ` z`$MP=`$Gpbf-|%w3`9?~HawXe(l^v1SEB-RSij~3KF6}Usaime4Pdy?c59twEH@UU ziQRu-zXOHwhdxP&V*`zfDoZL)szqClL_j+Vk2W9rYPbSoxR!+FD|-?mVPrG0d<>*+p65{ zEPD%or?|zcYKnUqF?Hm?rj=@yLVj1N{S>%gOM;n1(hEjXCrFyi6~@V0AmX(^J(Yuy zg$lM?sFpFgD5Du=AsgpiwQLwpWW$n>3UVMsdIv}se!UP_OI7H%jw!68nm7;u5n=VHk76(5f%|4}q}BEYR)cI_+NfVn6EcVoirq5W47< zKld#+%yi4WnhUF|H62#|gszKZP~CR-^|;D@UZMJW$~;(-L63afUFLD&gpwvG_e|9U zQ$3Z2pd661t53%-@@&xY8$3Hr{31X+xS|R6dQNJBlb*|lU@st7*Dnph4NotxsL;zh z(5qFr;Th<4bva_&h(Z-@{;VAYvZ3G)lYa~|}e8mNYuL_RqB_VaZ;Aqu?d7G7OfLPgP8&|itwYjbF^(6)z(kstGPRg?`HAI!Z)KJnx zhGyn(=NX0V1-r05qju@^fS2^1x6=bc(tBQ~hy81vUbsK6n=H~Bn+1Vmv#vG71=*Im zrc;ECX67%+N6ZrBP-09T5yLbEWpW7=Qx%lt6WicA-8y|B2i)_O>Ca)G^0iwb-Uqgh)+{Tv(M1=`Aqip90tn@=M~}! zb#&jxzjiN2$iX8cmKv9}ite1F8dfyqWUr!OWkUovG(5(~5{M*rY5oDO+AS0sg z7En92y8!i~9klPu7398!Wjy*Z{Hdc)VUn|;^@7$e|Dy`Ewe^=8xDL&+{>ENX629FK zwXGeDGP{u&^(gcffQl24GcC|?TGptB&h8;Kt0ip{QZvLNA>A2EjzIEtmg>F|E6`tO z-DGxdf=46Br6KYOL{4J~aNf*v{-8n_Fcq5Baz`r$ceMJp6@p~JY(|!RU@^mlMqGc2 zvP{(SXe*uG(N@QK((yP%NqH2lDng}SRkXS&@R5ZTL{F}-7;o=7JNS?wOGD?;XOPj2 zRxNM&MN2PMWqlqnebI6U4-)QZd7u@HeV|oIYp<_ZM^T;~KsHs+RRW=Xco_p=K1NzTlC0m=&Z7Yb0q;$>9NHsx&yfTI>4s|nfo zBp07#;*&ghf`xewC#AYqx;2xA$fhAOYKW`?)nyiC{yIG+Q6?On^i@fEKbFP`?a$ep zi^Z$Gxu4~6Rr0eu^iUE(oe-fFA|(~#sun(fVrmKPHZxo5{>Qnbo zF3aFiZciQx?3p(-PYCDc9mzv;I+AxZFXm5H_iWRe!UZ%5y*K2U^06}^$*yPVwnypq z%E!#BSN`C9^=B^V3omv7VPN2ev%R*b`Ku1)+{;C`a1WdwU-3s=P9(d>_p?6}76C7d z06SWw@ZCyH%&pFqaAF@ zs>9I1FjNzU&W53@0$l^|9c-!8n*d7LDe^I7*g&-n9JGNN8#rqNR|#Be?CO5UX$BhP zu}72PPf3+N`qZ{&0({xJr}S?ac(3b@PKHg^UJ|oABQ2jK<&%``-ed{VCe+;*Ni{@b z4Ut@TY3P_tJ%#Lk*fLf9kOb-*+3+9_9%R6vWcQt{o1ov^M7N~PO^!F=r~^)o&}p)f z=`;!HGy&N|n@JgR20tZT*WjnhpMse3r=UP*Q9#hET5r{(Ib{ikq6mchFEzNyUFgCN zN+(T7CrwBvt!q0pw=+EW@eEK3mHqwG25dD~ynM4(~oS&*_sY3G3Pr1;yO91a$x@o92`3tcliWvu{v$I-isJzV#WR9~Cdzye+yC(lWhBr>z z?&lBURB})OIf>Amgfu6h=xN&;&~u(f1~!Rimzy(qx%to*2$H4#XmETW5ElB#dWNiKxTPjyDTW+m1K8 z%oDelo887~Vs*Rutd{mnO<3&!OJrj6FQJf`wlQFr$UI#A5RPC#B6O0<8cX-Pu}&-@ zyr~}gzl(DEQdVyVGSb^2NW3g5OZ~3O!JZ0Bz92%|Q*acRTkvJs(+rB+WkO z5vSwdn8)3KoWC$67o0boo~~NV4d-1W<^mumr-PMQmh!FpA*H~h=%Tx)C!$TMXNU&{ER}VLXM#sp)>1$bZ6@6-C)1=p7H7iy9-cQoAcgFVKJAm>ZZk<2Nc%kp7&u`%0qM@TFN~@ zq74d-KklwwO}I#}fs0AQdG2U<@&a>IYXSCbf#8;HWVKDRfoBbU2-eUq;C1)~2v5ff zwIF$EGVn_^PpxGk@T{k6!Fuv^w0W?$vmp74WZ=KhJW)$J;4p3nA|5L;LfXM5g z!Uc0dxzWKrkZ41g09c$V0=++|1IXVZ1ehzI1NcRTd{LN@kfxQ6tG15J;ytY{!G3Y2 z)SNFD#W6!-F_i$cA!Z=A6&XJ`W&_q-Iiq#}eV*&DS7Z9d)u6fxfYPJHnT{z0v<5_$rUu6bFg}u5Q@09E=29-9nT+89*9EF9wiJ*_#18Md615 zK%0w5#nlaBx$zqPPwU$aW}&{_d=LwLyM?sA-O{wa-Lkd5-Ja6=b}Q2Qc57=_8+oeA zxKHC@x=-UL$77Z>IetOBUgup9zm529@te3ExGCWrj?&e`*%6$=!ZsFvEFRfC7XMv5 zJf6P;Cqw}+%=VhJekah$w%5r%56(mJXlaMy7x8Ern%<(&r4X99aJHBEl%(yFi6W-n znxBb7?H3 z0~=^t4{R{D0T5#wOlyGXX$@v;e3ZGt+LNc-dy*9wm}pkya3Fa)+B{MH8aRqmVbfww z3Y!XPdz%#98T1^Rx{`iwSXWi2Q%>V3au{kSuYthc=K*D{Avp#%7y(_QR3oS=8>~nJ z*^0EY@IX^%GcFr{!^;^Dc!$P=i~*Uz56B!sXTu@%IkdOfYfFNlrD@2|(zKZwsCF~8 zAmsCM#$Dc~ahHN{$1DYPgrJcOjm#WPuSHOehF>EPbZQhtSUBXCrNJ6yX%{n4vKO^n z80B&Xbm(nEua$wh#_!7 z;c`ZCCKd^cGY3#JAcVP-FbkrV3R;1eY<`?d5dN2JejtSZC7U0I5}fv%sijUeSeYR& z;wv*w(~PH)0ls2nOohWC78s|p08o3;39Fj4m2^rJ%b8=1g6gbMa%GjKGbOT;MH{v? ztHkm2Ui5Ed)ft~Yo`>pfh?NMqm2%J?{v7EG2z5nXqj#TF(GO88e)ufphA$RY7F$ZH zX&?>Y-5oZNvUnCgk(FEGp$p=}{R{fb5TZhn`JDI>nU^~1Q@$x@&ipE;LRvMTdS zKEBMX=1mpVnfo%~6ooB$|K@!E1vod$T#~f(2C70d0+`L= zr;CJt^?wPgtMUElyV5VToxRNV(3Oz*cU}qIAo@n=whrG8jZE-yBArWz&yfig34R#J z1S=9Ihl6-Qy^=f#hqBe6lqo5gSHDZopeZSHQhWlFeNM_!Y2DO{N6qa24D-kEV?Sux zCj?vf2|Z+Q{4i9J0P9pFOihH|Qxi8QBD6ViOcLV8B+X88edNNbZPK+Qw2f;?!|3Wc zj8hgkk0#&X?CVDIFkW*o3|b;IC1rYwtJ}x$T&HgE`%D?13c2G`ud{c&4li18&QJS> zKF;5yt)&y{k@TCq#N%fAaMs!38T)NdRH_bb$Fk(dhH7|Hye8!w{YO#y=TeSj*hj;J z2HS6;Bdo^8wLnb{P3KA6>7gYF;ZXnrXTU=U=mVj3DQHFKQlP`j&;hB2PfV;r9P0!l z`D84fVoG@MphV$s$zh?haWIAz!GT$YaNsE%&u9+G#*&2fH1qm|1&PSASjFW073q_q z*wjQVV|*%7g^Zg_HA)6ZjwPOF@tjYb$GNQ-wY|1xdu_k=Mi^*6P153MbBd$2`J{pC zh+UJkgU-1-lEx&XeO*u8-2jho(#q2D7@n~hy<8FrKTtE zNrvz}$-Uvtt9Cay$y+r}f*YZ-^vUT6O@<7F7H6zukG76A;3cGp8YbnyYVZlGQ#&qY z3`+?kmjTpmQWs5)=dK5h?XKsV4`q7IcgGx&yW=~9c@Syry*;)Jwp%=-|B9Gee^iYJ zZ)hFW0D308q7Hb!W$0Tt8o0WlVf68B^`S-F@eT6hF~~o{50j1e&+~(5o`1bxh}QcT z#2|YMVya@$P)^5u7vp*#&HKBU69J%41mwtfdF-56S2v*N#108U){x*tp7`F#X*OGY3Z7!QF9;}6C|z(G#j^azGblzKs4 z>OI4Wpv)4OOZBODtIyxnc*0d68)b1(!T#~z#7BZMVFSk%pZ~w3IL^dJf)sg!x=251 z?5N7TpW_8qeU7w{p*z0)e&&lxcGgOAuK}E+y!(7?ZFhVJKEevQVc$JV6Xm47j#hE2UM;BK)i>utX)Q56f>RaIx$IcZj#SV~zQtY5r z>p&TrcdO4czSWsjLW5N9p)zAN~KH-alptACxEPvCJFL%v(D>W7rVWpSDsH6n4?(kp4T zM_S|Q{W?A{B>G@y2^59|Q?ahqL32Nh+r?;x|b%?840TZ~g4SP+15 zmIv1JL3x7S!MtH-gn2VIyBW|nTNtsKbZ{MN*R!ZjaAhnS&dS(zJmbGE_5@v}Pq1g~ zjQ(w7*kRXp!4vcgLb? z?~Xkl8{Hx=#oh+`ahr`t?;T~cIgnY~W-DX2#)@&a#vYA@aZd2sf_94X1bv%%gX0Yq zl)cyaL0AlP4y?aiF`#J}dJ>(DDv-Ux89lvbyt1-UyRv6GF<8xX&SE^K(zBe)iCFFw z2K$9lx)n0CSJ^+6rJ(j~I@MK=5x& zSIctC{JsdP6+Pqn1LB`q;lxnR?j5Qb_G=DKh#jM`fx!l)o2>9hXhf$Lt`9$|P%Zu* z1^3e%4*b30xb1L#=&4Y*p&_&u{os=k{vjWs_pQcl3$nXe894UwzLis`kK#%K>*f|~O8OUkOK+gz|>jM~Zgl8ec z3q6ZaO#l)Ac{l_n`nNx`$3E{%i28 z=VLw9{K~95^ue8=X3-CpNy-Z-R`pl|<(A%&E@a{*Q1(_F8+xl@ z3c+FEas2@aPJq4$hgl(5O>wQ{MGZ4(a?VTC=QMXmD=Yu;H%Ft1Jn+J}o6eHEvnDl0 z+nLmOe`9HEYJcON*}(VA9?Ofk$L1}~LuhH<+yaE=7OaOOr&?cdkfDPG$FzfIlw63N z)Y#K;h_C$+%uX9`YfPWNZH@OePSuBG+9hWV(U*nC{DPp`hlEr0i5ZDAhi4>if8%2X z;E&WalaTr(YomDi4QsTz5o}47X|JByquD)7$lf5qp*f3k5G=_Zor}lVyn_Wac(~fu z6*iK>a3_1TFdVlx!bVUS+9ZYHea#4q#g5$_iwvjf6)J`-^Kr78I z_$bRh(}9CrCmMc9*HlETaV z$9%cpnzMs276U+)Gd9oQr9gC{a0ZAeT4mPmEEeZ(lrr?ZW(q{o?+nvd<}tQBe~x(N z&B>ozz?JX0sFbzKSB+~3UEsh)#XCcntvSd&)j)_EV5Y4TR^xI{z^Umf7PU|0_Mm@O zkGv6ZnF~eYFCe}KtKSzG-%;WPVaf8YSVYaqKSlpKNI8|yquHuQ!2tLH5HX-&JQ3py zCTN6V1Is0_Q1B(PK(J5%5jL{2Zm~DLrABc`9TmHkt3F#Zw}lsPY)WO(3#83ObOh?6 zy3Tbd435_utmFF7I?Z4Yj#^<#hO0ejz$k<`pOA?JI@$@~Wy=03>juw2+|ZJ-vM3Cg zQfSFsmx$i2s1K&7`tZ@Xzk)*j_?N5!zkHAHQ|}FPu=s~LmN__WUFJCEaQ>h|9dq<> za^s_ibBdE2A5*Zq&y9~QxY-9AAH1^1<@^zR-CT=Y&L3mh-F3+2{9ibW?|SHR{yjQ( z*97JK3H#g)%K02#R@B$Z`BOONs&m}M#@}1gM}LY_u2g1Ha#b?bsY*VUOnr_e-%F-G z_mYRFP@myA9Z7u_rtC_gKD$ybr%<2EDJ7}YrzCZHD)pJ3x*?VNY)JhkmHK>x3r(rd zqtr1CsLz-N%NkIhWexT>pg#K>TyH>qt~VGcC%6Zu%}k>nGt)MuQIAb&CuM!)WLl4O z>d_;8Tsrj_m%bvMdaOu4m`*(oVwsA1+)N*wK|Ka%%*~)4b2GlopdMdloXemd=Q8?a zQjb2F^D?Q&yv)^^)MGVj0rWrttEuigZpUv4&2iUw9RE(JmwT!^3VPMXxqLsD*RrYZ zIgCf^7wA?3{TI(+NI9{{7l)l0Z2)KoWL?$6yCU_VRnF5JSLbl9m zcd5tm2Kf)Tv9j_yWv+EsyX6)nwS|z#BxK91=C1zhlqrzNBxK7xnWY(nbg0c<9|sfNUkigmqL{3oj=QRoidv2vRL zS{@KlPq#EMO!i|#flqB4=(pD2{aE5*5(5d>-~VqLYamIMt~6?Y;AFt@nmUQ3knBfR zh$tlcwx#@l_6J}I1Bs|W$i^DTh*H$evQ+rQP68P z&X#!=Cq#AW3nVfL*)sbF#|CxHFAYu#I(|p~p27ankjNy?mf1g8%6RB0kjNxt%bXcp z7t}3hUT{kg^%XKF1!o3fPa6v3Y?(8I^B51A0=;2FBJ+pL^MoYy`S0k?{vGQl8Z|A+ z@mn=5X>79V-73p!{XQ<)>VRIfgla0kjNb8E|-4I)>2@U%z~RmEAx1ag08PL@;Jh5!HUO&f;$__9 z#?s@H|3Po(2bTKfuU#KhS?axyT^|%Xp!OsO!VYM?2|%$EN?#zx37rQyfpJ0Q1pumC z(D(o`3JTvRMnT^S7cg$9yAD8=8`_>B#sg(b6fhoWIu}5(7mDs7#`{-k`mSgS6H`-I zSZfLcYfWKattrf_HHCGxrZBG76t>lx!n9gbSXOHa!)i@oSFI_`sx^gGwWcsC9N)hG zpZeL)Fa>+cDcWeEhITXe!(ZfkPeG>e@;ipv$hy#Gwuh#y{WyEyh3KP#X^8A8=9|T%Paw zXHoN4p*j9gZ9J$m9#j|)YKsR|#e;g{K_&5^hImjtJg6HUR16Plg$Gr_<1l7iXvp8A z2$iHV&i5>6<6C@kLv`4Ay#Jy=LuZGYq z8c3#3-?<@0CI-DdGHF5g*xQ54`d(*G^{Ho)2lCLl_V#?rDUA(bqM{HBoX8Uhv0jZ6 z-bs{#PFCU8KR|Cv}$!Qz(5eP$)LM(8ig+Pe)YMk(H_CSgZSUWs2m4f6Qo*QN=wbygW$lJl?mbWNb zAPg8v?Ilj+34~a$#>q795~aLNx*LSvU``3q5fIrEBxV5ydYc)&0nbK8Bb8#hGNOD4 zjqvILxsP|S7dm)|>%&+Zk`nDhoai8sl&wHgqJt^rEn0Adce0lyKxma$m*_a}5|i2p z?>NRHU4f)T#}Oy;1d_59NJ?}GrJ$}TwTjSN%qJnv&XGGoN`VUWCiA(wUyg5z9CX;&UtTo zSr(Q$=Y8Ns{X(s|;>GTj$Uf)2!dRptkd*Be;zYJUQpy4eZ@F&_c^hO!rQ$X4zrry* zPK^PDIBUAS0>jQW^adxUV|5*?18UY%L-N|Q-}jA=rG_cC?>=S2B1e5UecBHDeMcD! zEd-KUaFjT)gFsRr1QOnxlp?kLKB0G53kccmTCh?L1d{50-`CG?x9fiXp?+N}`|*lX zDa+6Nqx`JVaCG#4##kGYdhi)>Qr`uV8X=JIj-nK)?^FG={U|HsbgF-YA7$?+^aVWq z`7$t-bSP&7y=n!58uz9Z4B4?ZBxU~vrAU3>KuGF4A=9n}DeYPysqY*7yCoX>@AiM? zhjj!x@7E=D(0`ITco=(-u{I>_`XF&q69odVR|zET`lKIHBzNcixBM&txUBmJ#pn{f z$}L5yZF)a_zZ z%#IjcqMKsQN>2T16Dg&n&c<9O4kaqMH&~)37A-d-$7^8Saxa6+Zn=bY%N0mk?qyuT zB=KmuT83`9J$aMTzcWvS?4GATHz=j$5)!N%V~Bpigq@D)7fj%;=odub9_SZD;7aui z9&lsz3m$Ou^a~oGEx~<3luIn3oqPfF#sb1io4jW3WbA8JTu9^aI-F$3Lf$(4atCN% z@arJ#CD1*-n4%{*68x6D$jd(5S&vv7l6rH5IK+BEr(+Eyyx#^PMN0G@A!#3k?4Cfd zsO>&@G7BXJgnPlhaduD8H*QEAN*em~jjM>$Eo5Na$T(el`_fedwS=*Nq=gJ5B(+x{ zsY3!u3mHi%(k?3Emc;2-Ux8#PW=h;VBe^1O3S%KzAgTFNh!e>IA=aw|65e@~Cw<-$ zLhrDx2=rU#P9W(B1bW-TwM$Fl*2l4*K<3uR?Gg~_?TR}Vr(46WxN~v3?jDFcL5b)I zcEufFtPRQNbbvVN2?Ub1B9PSk6O;mNFrLNTjAL04`Yc|T?UlHG@jA70aaR}%4Fr<1 zy+WMG69}QV_X7--D42eJ%&K& zu`G2dz6Z3!F%t9`T88d1dW7KQC~b?7J=XU_UUZK^$R6uQho+GhWBusRgb;Fol{m`M zwPgZh-{x2^xVJghn^@R=T4+ItrAFvW%7#Vegy6p|J~V9V>)ND*6$!eeW+W_1 zfDRtU&S0z!NxPmwoai8s)I@=#T`!^($=!+s9668{z9OL}L6_*7gu^Da6$xt?3k?L4 z5?w=_$P-A)Rv;Rv;= zd;Y0UlW{%@3uKd$=i>s`Dhr!I_>dMC$&TT+OkfmJL!y?%kY`XymsCQ~fDsWgNn}Dr1-~g@0<{NNWBs+rbH{bx>!3G>~SR@

NW zN$ej22P5`K?~x81pmEqk1P(@Afn7x403F0WB5*L`P3$BB2WW7{;0)kk#JSi_1P;)b z*iQrwMm&cdMc@GS$?TH}9LzZnyNba5HEYq-o{eo_+UPV?cWiZ}Ed_weyD4oSF%IZ{ zofs_drsF%AY;5DwX9GaxU6H{E zJkWS(CNN$oyc7T`?_XRy7k#lNE&6g*TJ+_*wCKxqY0;PK(xNZdrA1$^ON+i-mll1w zE-m_UU0U?zy0qxab!pL;>(Zhx*QG^Yu1kx)T$i32Z(*fB`iMVfA>Z@!j8D+9%6bw0 z2QMym!@Xm%d!1K%4XyK*@>;)Af0eoZugZTV28b&$qj|CI3pnQ#c!cjJsYk({{P11R zxH5gV6HeZ1;(%WhS8Wj0ac7A*8+XYdF2!Nmi0jnikD7CSN8|AkGNc~brN8PHtu~#h zp74vx!t>}xp6~B=bW#0v7T_qc&igH0u)oD32x9yBhw=WwVLC&ImJD^VGX#WWs0%Vw z!jJKlFdJR$Y#>^)(Z$XN5Rwg?ozd9<;%Gdki(N1;lx)B=BU*LOhOoU?I5?40LtW+c z^xDab{TRL#g9Dia4hJvF%bdCx*BkR*S(nAI9RAf3g=-UlB<(z}hIsq8x-HE#!0Z7^)8SVcZ-!21UMa&uvNa~2`8+#ZZ zGeeBn2#AZpG5BI25VPXeX<&2QMGc7gK+-#Ydc2VAj>jF)pk+4zPk0o|6I)L`TF-RB zjcO(;60LOI)k7gT&bt~%_0?+BD`JaTCpt%EV=F!Bqjg6xT5~c{aiFO-9??p}TTQO` z5W_yEk!>tnd{{1P9vuH-!sYS*nN|DslfO3WKbY!)r=*Ifq>88Ljjy_)o@%4=Zv+bK zQWx;#Bx;nhy0E%+IYigyL(U#9*GCwRd$@MF5MIlDkXO_t&mga$2YJ`?{LXssWgWik zy}|GozP80Ev{(PK2sXWoW~u`R#UX>rh`zy@BCRE@hwJu z2Cz8hBo88-j2VUMjPUGWFH~9p{o-b8U^bU@aFt`+QXO8(Y68EuxQnR6cX$+|{$G?q zUq)Z|au?^vKGkKHOFkC1)^h{5tWN(3mbxsT3lrObjHZ!@QrJKMT0ww|LFAh}%(no{ zIf42W_?xkwzG0g+#E{vr$W=DtBUSVT9n8OQY;<_vgJUUNE7eBF7Do`RI6L2$U8qr8 z9NU5VI2yIrafh6D9FHQLkI>20;XLXdKzHl`xL_olz!1-HPaU$=o)aWH;W-~6JC7T3 z>yRB{MT9@R_ntQ_?Vk5LZymBRzIi^7&GS{EULccIzI_bu^PSbAgIbu-DOB zZY}EVti-7Vb<{nA{^9WWUhP3}HEv#E@Cxo2M(~jLk(a?oUhI3d!{${#IZU)6W`rE} zn8}Yv;w!DOYbCgrhdyusGWM_pnR7Dt36}9t#^B&M362ZSU~p~lG=p}IkQ2}KB%L2+ zU39w2&i+}A^=!fI$N;Y3Y9;~h$4m(buqyCKfG6``wfO;PegK*ufabsY=O^={Q_$%H z(EI>2KLE{-{vbR-h-kmgftF1cJU->Ap_hdmPR%y)W0|e769b?V1E3QFpcAW!Bvyyt z(OFc*LsG@}qG2Y4$cMq~iK(8nQw5+?1)x&}pi@2h#8h2n7j1q3nje7X2cY>ceh+`~ zN5;eeG(P~%4?y$7AMlC#_qO=~Xnp{iAAsiX{XP7pHa`H(4?yz+(EO#3cekDg;fl&L z%%N{kTQL(F;`su#6~HN+t`p!IZch=QIPe+Ia(xyUt-}ifHwCEEem#ql?G}K$BUR%W ztVoB|X?j|pRu2?H58&d8P%n2MH$qGOn^4JWN%4fjGRUTOa9n@np2cx|m%oPN^0AnU zF?du2z7B*3QqPOE+5RtHe2Ss&WA|Bjri2OE*ki}?^RHv^`Bwza<3@ReR{GYmw^-}j z=0j++e+MSnSnquN7uDjQWl!m6LY4K9_&mHhG*MYQnA#;%$wLDrbwM}g0 z?TYU)y?_%$t$u(T`d`S#S5v_IJR(g3p5m}7eBX`3?F%Ziu#O7FgEcj;e|SNKF0jZh zaG36hVfwEAaIf_a6tLgaL`4M5bt?mk9A31m>h_8y{OXtP-hB&^U zm2f;k@hkT7345&er4C9}D^ky-snYZv>7Y-)lCDN)oXS#V4SO`g0p&&&jnuYA_p()G z&gC53TaDcm5yM>tDXWGVYlO zc<~>v61xQ0%666Q)TVa3aR$5nw)WQl!4Mv{Hovg|PyfEy+l>NmyYIQ-K~;vGf^yGD z58R`4bLsCzTgu9xcmv;LTVxo+T(Uy?|d)5!MX+D zMfkN~81Lh=RE2M%4ef^dy#umRS#V!4Cq7UG+C;XrfY=lFq_|nU$ zfSQ31xbUk!b7S#Y8+@Q=C%?_Z&+=faFZQ!o`Rt7yb1)Vkxd}%f34I;XUl=M&Doetr zg;s$_ko=&Ed^_dwa5=t1CtsnHZ^2pk%-qFP^(`)8kWa>i!*^5FBwT`ky*7NuP4&YE z+f-%x{&akjtycyvP2k5HN9NDZS5^5n`3zS!R*M_2Zmc%&hTXl5k2Y2(8eeFvt~b8d zSoO&6k1y(FkH}UP+0(Ms=h=(1)he9nQD0{7%~nUUPh_j}+1Inx-RvGYs$WiN4y3Mw zRQdK|P5!q4YjV!!sLMIGa@50|Pjc0O+@ZN@bnc{FH8Xd9u3DD6CRc6dy~YP}kLRk> zxtDX*&D@8%s(0RiJT)Y5be@`+H#1Mo%UhPGs`57HsTzEQ5mmBqS|Pqay1r0VHmSl+ z;H|>qCTe_>$|lw$^c>+@w9dKC34hX^PTmx`+bI`E;zR7t3r@ugBH>?v8zEJ1*B~s( zSHK+>Zill0R=O%(a=Rnk6L5c{+UGg~>~VaP1)q<30I&rA+y?Sv&ynJpL(OqG+l6_@}um7V{tFXP6IAB zM8fVJZpE7n(Ve&tyVWuG3AZ|fvk7!!yy#ZnvMD^o&4h4`fMcQ>jSC1ZHQ6)Sqj=*W zGJ&?Cc(Wk1@oe+JS#J-(GoCXZbq~&UsMFqFUNn#a0Ec^rdsPK)0YruHPWP%=-g#cN z7+3yT>I?4|Ud229u>J1cnE7&$2km*pWwBgZ6-Nm7$=QJzp* zYIV~3B(*K+>m+psx6i=JDJN3oS}RMgu~MbDW(&oeIw@7n!@XCi$+-In*BnjFM2C_- zHeGHnLao3mwz?y2>t-e{wB@a~mMY7t%va+JtDjQ4pQ>(#t{T_RuWCKIjrtrn?qYj6 z>yw6Ra6^1ypt9w#)<9MR`E}N%EQF7u`##?CVoPcdVO8cH|MuW4o(H9&)_B6uf=aFdK#jc}AG@9HT2PwLdEN>aosTT%S)d1jm_1P-@#2T zHc{7bo3f6UtBZBaW_-XH$SKVMj8*?uPSHRp1$UO<+@QFI%#R0f%Rb%|R z87PlcGjPoSI;GedvA8B?Vo*&C&JW6+0b%5jI^pB7xN4vn&2)ZnRS>VP8v(8gt_|X1 znTMBGP`xGE7BQk_p!mlHotQX|+uj883YpsLcYrm14=MPbzV z{^(DWMkJy40pnEMXoY)@cBA(hlQcC67hCN}QoC`x6~00GC`lDJsA-_Crj19hkx>P? zi@!hm8a`jD9+9ymd04Vql)MZb)T5-5WL%N8C>a}>hmuuIgQ_(2LDlK13ildulk-V2 zyg^GY_(H8GI0yUQ)-iBCa_2I`_sWCY9qLr$`u@|k>Nt}DpO7+9R z7p>H6w0NylHIBY$rM|`47p>F?j`pI|N}PVtO4Z=_i&m;P$9GX`Iu5{SrM|=o7z>9T zcw$5ji^w?|fFr#|Gs2hVyy*gvV)-iEM&JT5Vh zM!u6GAVw-2fmo&F_yeXu zcmq}yILyf-p8`$=_^G?hjpLeY0hYN(xN*voGl~(Ic;M(IClfr?fET|Rm>YZ=0ZTpQ zoLv)J?ip)?L;yP%SOMqaGy-NSI4yvA#SMUaBj90AjYmBDF>rFuK_`Ds%5M<7YlC-X zCPF`aH4l#R_?S=xl+Kr)^4PHDd_;r%3veB<0Fpi{Z^8xM5IkU4PFHm}t@`@I&jsG&nC$yXZ7kzJFcDY*XTWV8lQ-WC zn|2BO)zvy-TjE428>E*wustv~ z2-w5l4`4rdgk#v43Xsm>1{}o^h;H6$8T_y@py@EFqOi05^ZklG+33Cf%WRMcV7CD) z;5N8wW2OgRYSjyGLKx&D;4S|Hzc@o<3hnO|V?eq;1LO;nfWzPft^J?XTsS0wi9i9%aXmKCL3>uqk;+L$xfx9uoD|8tCTXmx7?`*hs#O#ex^h!nrj5%UskHmaq zV;Kj6Z$Tj7efTY7@Q%4})Aom(vRVMIWWL@F58y_l1sooAxYPzq8KtqqvFrkHO+f9$ zVO-2u0^{tcaXN~;YXUn1IG1}Ype|t1gyGE$lVP)tB6uutA%NwSVhq5?aO_u(-O zD_F9Pik%j#s&If76Gshh0yrynek>N2_i+*83c~Yam&L+!`c^E?y_RC!UB*|^(%^91 z@QgF5Xr>T=b@(G7y#fYx3r^OUI)YXW#{xXPspkbTgs&42V>*4RiCsc)3&15D={1&~ z(&{ptmgyiJ2EWEejkq%7JtKJ82I<=jEGGa07RU7kNKat=S!ILkTu&V;LW)Eaf;rxXcuuk;|)jzz@fkjI22B|+TT`v4kuZxTfp_! zfYac6E6$;Gf!!FdzJ|vtT*o5dM!3xyg6;8p_zFUTJ@JQa?BV#YZ7kzJa03Ja-h-bWfRyjpcQZo95?APi7T1+ z!u1gqHv(=-*q$H`k(je3?6pBULjrpmv;tm)OC+YA2^SN@^^XpZj}p`Xcr#k+ZUlUk z&?nIl^hq3$C?|U9wm3Oat%lDbhJy$=8Lp0oc2?rNL~%q!^-Nr1vn@$nZDZ-02!d)z z5bzEh6frU+98dRrnq?zF@cGMO-ipQ)^ zCBYUwFzGvx^c6wUexFy}Lj^gJiK91twD87y2Cn!FR;?F3)jN->AK8)hOD87s0 zwhuNe_N0Ae(Bo3ZCv`4bnV}^0{3@n|F(na z$J4cc+s*U`>Ds?-aK^|C?cX*RcZq8Mwl6dGWN81ka~W4Nw0~Ql%z>GBai5qePHxe< z4TT4r)@>eK*tBjt;KQbMy8tIPty@2Mv1#3=z>Q7owibSDS~pn&5Z}88SO?I$jl}DN z)@?C-*|ctZ;LN6Vy9#eMt=mAjv#E)hGqi4MDem+@lh0q7uh!;IC{QyCDhsThR%EK~ zZXVv5cByB7rv9nKh5N70desB$ORCH4t%zOqaWQ-p4nEKRaqU4wX6k3PU-6!e;;A~Rv3Iu6CG}5%4?V4 zH7@$^H-^62?5!DJMC*#dm;P8&CzyIg-?94anGAe_m2TA-J>h63ze1r8S~dTw!^ez1 zKQegB_z@|j$KO8zA3}cD*U9fM4Sn@LYPyd4VRaU~2}C{q%g|Rg)=1k9y$$|nKhXH5 zCSTu2^qYx?=7ABZvdnqO-Cclu$a0-peTRvCL5a;xpdV)BTg|oLAJO#_zexsP)n14Dn0m6{334_W`byq(gP&b* zv7=&T3#Rs;^M=0El&`&o{Sf#BXPULNPCbl)0crp0-)K1>7=1DYUt2aqEr3rz|7?xd zakl;c-pC1c(fDOX&c7IZjqxc+GWb^vzFn54*A9l3^|ryk`*V$NZ|J)ke75O_IX?0C zM_bM_I&AkpeFd*^(SOH30l&b=2{qN!r%bxouNM2XGxgcF=U#(<*Wm4RYYg7{xt3$M zuN%OJ(4T~MMQdYO|6|I_;srtY+4R`&(&-vG_?&|L{HRAPuXbi&e-HfpWg2{So(|jn zLy^H({z2n^hcJK78T`8gbvS77|AHs=wEI0BYgxZC^!ALVz3C8NHu#zxO=;WhKLl@c zN1oV`7X9CSR^#mYJj&qPy{7ShZ|pza;H!V8@xMX;!r!VV$j9;=Xwf_DLe2Gqs2BBJLtG6x3)HhqsAcN2TK-05b z@i!KDj*ph9&-VCez5l^~wtU_CpzEt|zG40L)o!o;-g@`_xBs(Sm$%;d(0cX#H~;HD z+aOv4U0(0@Vas=4eZO1FBJ1`4djAbX{PykN|GrDtH{Se@H^9>Et@o|B|MTr`Z@>Ec zE^ocs^(~wBwco$`yEodr266*+dHuis=*8CG+9OQ(9WCb_jcTQGsDch&Ayz(KOh7=34&fc3b~881ch9L#awa_vfxGV z;z2|&#$R<;b!OUqAlR@|{ZYTF>gwux-3^Ip8sE`mqj9Vtz{Dx|Op%mnR*XnPU5@o2 zZp`Hc)s|8#t5&oR@-o>|`r3Qji(4DZO6#@lxAn@pUcIrnwW8Or-B5aM^ZL@_roO(q zdUIt*?<_8Du4pi3O_|JJ)Je$3RE*`WwfQo)s0;5}_^bqRFneb^( zkEWGK_H?qlr-yO&ba7=dGIjwXxQO%Ywn5~k=0L}JwHvF|&BZmH>(09uo~T(FS!Cr5%N0x>CJ1ua>t_smpbrb#vRYA!4wDz8Hvf>Y=ahKW!a`tHWcY@qUnQWNVO)C-Z^aR_>Z8LbmE^{~i!JC7o;YFnkATxCP zlkAjPy6E-u^cg~CVE3h@7aO1~OPjieAIC_Yl&{S}m1hZ-YjE4trI$BA;rB1lE)w17}Y+uDsF88um1^costwaH@waIMN)l8F2iwdMd{n~hPXashfQ7+hvmD_s0vYi;-))GsXY;()G^0l{qPI z^0`>()j+sEuW)|^+Qr{xywI}&o{vGkcM&x@BJ#0J(S4VSP`>84PJH<|>DzQ{{Nu?J zy5`$*#CW0SxF5$-TSEKkIzV@P!T-s4p+`J`W1H12_?N6SrQQc6h~OU(BZ|=5fx$sj zCE$J1v44pb2z}j3qlZ{mM32x4-G6uRA2D7ijZ+EjcfIc2kk&-x{l<7tnNjqI^QZd& z;g^{2_&HY*dYT0j{zd$S_)pMBK4G^#Jzw)Vo1Y)R;Yt`kchKF%&oNGD6*!!S{W1R2 z05A76p~=Urn8{qfsXj-$t6$!Mgm#`iEi$zKOU8@;)fbk{zZ0y>1(v(7G8OU_Dx_cV z@=hxBc^P0psQ>E#|L#4oKU%Bt_X9DRV{=y9W z&HIk?8(h`9CB$Eufj|9|<9tX7rc*-v8;lSA_w&SU+_&V1hGhs|=sPp;3wIp;M`4_S zA2PmUqOCr2*Wshj`XhljwLb&D@QRD5QFcJqzWQp zw2XADP^9zABkNF%oU8;41a?N21Ux)Y^fIQl=FS!bjI2zY1pn(m(Tn}Fb~be+pck_? zbT$<+HMTP`h2rCba&mSwHMD_p-@MV1cG_Y?=si&Ez7Lk8J5Ac_Npe+Gzg}5JXL;?^k@b=5P_JyvWx8mg=9j;j!NIQmq`Gp}%^*L} zH6*vTTs(PP-)*vRR@D3*VC@v@>AOkip)m`?98tmeC=) zJ0tDwld7hLddl}WTe`fI^6y6f5Iej2LhZ6v_uxmKQ_Err&d!<5v>|P)Wjq`NlD&yI z!e07A;{tmLv9yGH$t7gA>+aYt5(2{lfs-XkbW}hQa{gjW5U$7lo20 zM`u%YjlJ$mDru008sz+}M^o-@?&fepNBze(?5ewUDyZb74&Ag8I&q~$*I>}UYB=UN zbh4{fvo~|b*3M3HwKlVs?vq=oEh?k-lmWWGWg-jnGo$4YoJ6uXh;p-~ab9;*e z0Gbr%0t;dX1f_ld1HNp%Mm0n#ZE}{$gaks!N(~~yb+63EtbKL?0(llvLx6aUOGXxf zj-(L|Rs^iS(|`nW+fv?8E!#6^Fe(ZcPaY{zf@~&iz_xgyBCmel3ml)J&dQ3JsvTmq zJ)=6Dq#$w7t+mhC-o48>s-TAXMO;9%WNo3#F6gMa)%*lnQm~ZhXa>yJJihRD*d*V1A+Op_d zXWiP6IRw?GcLGV;9cF_$_^6N3Oc0e=5Q3IK(TLrfBXq7VCPpyjAF^16lRKW1O1g%- zwo)g$=HdMK;nldOIx8hNLVR#gPc0K z2IwVG4^1*PTPUJ8-hELgZ=p)Wn2^HRq~?76H?>QYTg6kBNlS z<|`$yX$RgH!NE{$byg zk*x?oc)@6xaIj@Sot=%x&rxRwrPP~M^eyt7!-+YpUYc7-f61T*UCDO3J}^W_@bpoq zIt21IY|(@A2Acw^l}qcrB-zN=AUg{7ID)R%rF(tCEvy`{0f7oMFQp!MHe>yixJ3t& zlK_cJKl?>+`)GtQ+6w@nQMY++@f(S5+&qf)>vv#Lq`OjJPTp_-% z#(dkuraBB9G$yR@e*|0{N`d;m}5Q0I6M(OHpu3qd|idRQ8!**r~A+%b#ko5L>{c_T|@XB#MbIRZuohW|+tM<-_j4hGi$!x?tY|2Ah@G`4KFR*`(x z)TfQ1mXCY&;i7?Jqni0RL}QA5q<#dHn?*#3KqC`xe0|mwwB?qJlWeO1kk~|Lsx_T9 zn&=ZjG#3MLFr^c^Nhv8r5;KdLNFEk)N&HYI1f)2W5|dq-OC&hGX)DejATYksC9x;M zH!T<6gZ6(412b{(Y6#C5-5-G# z1G-a>Nj%QX_793eFhE^L74H=yQ9y@5M28kpg2>?Gf`8#;{DV*cSoEfg0SXP>>r>SpfEjAQ z^844Ah(UC@@}*+CmcISlW&x6o@4(aqUZW=w?tQ>~-5F#M#&@59dky0O9sRQsD?mil znJf%2IjDtI!iL30d;vI#29Y=u7y&Rf1ra#@#%m22(iH<) z>1>cIMeYcuCL0Awv@6Zd?UPD{d=NNM)ywM$BW{H56@U1>%W5^a469;~FqP9?aUkKD z4nG88;;vIPRu{!F4}-C?fcGAe zNz)*AknLQ5XFtLx|IPm{D*f{IY^a&)x5<+A72Ne^^>kQ%*5=%Wy6tsSRKpGKsk^x- zKV{u8H+F52KM;S)+vwj#q1XS>)WtqNS&F*Zh+kRmxW<`J6=p5%)Kcm*Wg@@3tj$c(~sKdK7rN^^@Urf37TvXnq z`fz;8@poT)D|xA-{=j(_iA^tTMfoV!MQ*8zy45X7=jyi+u$A9vL{*0MTDBf-G*EJ@ zc3fTlCPbEM@Y-HEM7@qb`z&}1U6q&`=b{RqVRBa{2ixpwlbK zV{&60M{2(GmH6l_e9ZWk$`aE!{5)XMFeZ_{H?_1&nUT7VsI+cobO^1=(7` z*N#SKO_c5)( z_his9;JQnZ2WtJ3lbZ2@<%sXv#>?>Zw=z@qefaHNAFmI?n&da`=i@iB$3E4&XZ@FS z_V)Isxxs7u>DuBozG<7Vwtd<(=e=^eVTPPl(QK*$2^ z$zc(Yf@fMFH#QhG{g4ybpeb{8S)WXIT#Kg ztE^Zy7lV>f@6G8i<*>~qf);{16T{~r;0&{2PhaS4Mymnw41!w*RM;nE>)gMZS|{f$ za`&7cS_tczIe9^?=9NwH@hd#E(En!4hFljhsV)&O{P3^$VxU$8L}4id!YnhA(__R^ z1}!9&W+5_U*p6Dzv^y((30x#f<|&kDY5m6nB`VR83g^^nxe6u&kM0O)&`{F&MtxOQ znbU_}als^%o=(^7qf~CcxNgY8=z8L)AaL@1P(w@(($iWbvh=L(2dY95qvq^OHsBWR zU5$Cj*8D#&(~fA=ctQ3o6w7wlt5ZN9in!Cl)5HV?bJ9J1F^>=kf3(Y-~e@6>* zmY37(gFhU?cD6@kN!HB@--)WOxa)6>AJ~n(57|^Zg*4xcv-|@V{>Rtg?~DWZ7z4ai^j z^pbRCW%<&4ffs~x>+)^|- zQ*@*5JCVqy9fo-k9PFvSx+|LsPXFx0u0gmRu(EYsea^cKYWPiUvc*koZl;$=P8hHd z=h_OAxt@Q?ewxQ1H<0__TpS|30woRtWIpvwj}fnD-8KD_+0#l9_E6RWmXA@o`&rOv z((~x<$rEP-4Bfdgcm@r79aM6PcVs1vT-oaD()lAM#N~G2;nNj1q6nSk3#xx;rTy(w`U{ZW~+tHL0 zs6POd zLL0a}mpK10e*+8>Emqvbcy4;O>m4Y$;wV&IE{6bNq*sn{+zF&Uy4JNYwk8LB`#A5W zHm3Ts#1}5pmnc_ygG>Jn%>oYCo&*HkMG#G)E4Rw!1@9#ioUTAE_K>^j4C43PJ06&Y%uL*GyrYHibW`7N6>Bhv(Nww46K-`L?37k zkL)JD&HZn1E$=uU3_Fxk%cu@7tALN)j9L=w5;+J;RNt2u)Uva+hZ566z_bS8V}3(W zq8`(C&gvY6t&>^@bAgL+j~J6#3}QQ+Q|DO=>j?InnD$r^My}LEIH?o6*qSiKXz9?1 zJVgQS=TcNHWZ8{Zm+i!mzhrN9TlnCZsYjzRGz!UI%O?&uCuBgcvVKi{uh$+&n)MD& zx9;l`jn7SV@?)X$$OcC>n@G^024Oo|PQTmh394wF8?sZCSkjWCwf>28KJ-8#o`V#) zNFMiKrjE$0QM=F;I7mwOHY0X>u5J}OUStYeplOp1KW7_HA%+_edYgMnX%orIE|k`} zGp#*J*c8Ihh!O-1VN;GqWavQS(CDlJSY6ND=>$%lZ^dUV7X?&>Y)l;Hc036jbJqz$ zSyL#Jt~pGoBVt$UJf72cw<-eawqz!7q2ze!k5#Eq9P`EfB_~Z%FeqXX#qGC1%37s}ux&glI0ZL$^#7h{D zP%bB`>hyMBMiU3e$>DMvH+wAFWCCTzlxUPKrB~GscjPjOqPK~9RLP6S+vWv;`dGyY zF-`+nK%JUtP#NT*)JD|PD_l&lQYaFA49jU%LkkkrEglyT*{gpuSt~bM%e3NzVd{rv zs;uAV9Df$OfP4Cf!d?bH@Yu7bqjANDn=Iv?P>^^PAv@FP>5;)NFp#FwxfVFE9G_)!ORDz zvkAp3j;p8K4w;|+`BiDgekmQfpFtXwmpPK6Z&_`D-Pl;P0PE@>ST>3;lYTKe&~oyR z_u)4<@)m1RYGptte*6nhN@UeEU#=YwS&Ob<-;?IIZhh4`r9_ZRZ_#)66pwQs^^?!r zE2xHkLRaX*t{1R8r0yarQINix+Vqhn<(PnCNK;jP`WrmoG7hfz1Og?8#|{CsCz4O- zh&YAY_RQU-l;k-j&u%Q%OpoCCQV7_`Y%`8<*Xu{>SYasrUHb)k7X?C+xh`1bxr*hK zWuc25$`Ir;dg#zTnj{1m$X}_Q^frq1ps%Q<{6S;jO8g9WA;#D@U7B)$I{n}K%$0&0 zbNgDiHAz~bGhyo(F4dnU@WX#F-=D*04{_@Q_o~cKS&Grk@vGdRVvFvJ1%BoJw&5;B zp~2CCv-+P5*~Pm8kXjt7T89cX*^&hw`>v=Sv24RGT>HRzxsgvZfnEaWuIesU$dNPt z8TDU86YEBF0-(OLf%#Km+GI{9Q4$5RkQMF7Mkjg3kqZK+b5jC5c}}6Tc$d}(4RNy3#`7zzaGe02T%m_ z2O@{6;9dZv%mo+iQR7jw`Hr6i+QVxSBrD?S{J5jmLdhhuUG5vOg+NL#TcQPqJ^m)9 zO7IQjz4{R6qO6bgNT{v3F}(`3cLUO~EAR}-?c&M1XqhC*Gth5xN6iz>KzU;diXGO^ z`{Y=o8PffY>~%w#)@3@vbTX@{!hODTrZ9d^{msa7=s74#zdr2&Xv z4zKtY;SDO}2qcujX&a0q?TEkG4#7lM$b?Ec2yx0#$Gf?m7zM5rFK*C!L5_WaM9KCa zO17m;q70eStd5PilfpiV)QVgL4k?tY)OlMt_GTl}Q$n?)#XH4}2SBKQxaSJclY{^2 z-50qj*u20)l&SStmvJ&IICcPJ)X+D|X?})(%CD3m5(JIk|Y{-ALNP!NpJgqfeEt7gu zi)c!=X^e=H$qgT{0|lAHfmynp;Y+0QK4o}Ph!86lUt=MKt%#M~dXnQe6Fr zY;h$1rI%!WkGHQuDW;Jy=L1rzfo(#vO!{$8x`ANPAz8KZCirfWdmZiZ?p0nr4%5H( zk@A@>Ag9EiyV+gomljQzu+`9{`vQE(YiAddJQlFTu1Yz^A+rz~4*cur{*W+&AetYC zD_r4@a{id-uJo9+|1+hdHsg3N4h4vb3fC5&9@$kb&Q*(NIeg4jlUn>8suK$j*8d(< zz+%(2LmQ}CBPAzR#(Yk9{hn5FK|69obXV;!Log40*T8@Ihs@Ad~H zp(u#5AMynXRr9SG@S8cWDeNF{IL2Bdkc9+~amebpCPnc%*u;0gE_Y;QRgmng@^DEf zdC>JAR#n*spxibbXe)62C+Y8x!ar6vvJKs6MHq1h4!RaygR4W%xJ|t;CV1dj*F#>z z8edL>;Z-uZqQt566^qD2*piFOm zIn!T_YJF^4tPU|qEl|F>pu7fY~WKKP^(!8c3Px|()aE(?I(&dfURaUxy%GUcS z0CMRzHM&qGF6Ld^LCTb?l_gc1c>B_yz(M(|Xf!u^>WWgb2CHO^zM~fNvo%Y~yOgCH z`LZ&~Z#ICy0{tT3EZ$h=AG+|2d5JsH8`@Xi*)Brp+eyGudM_@1S%Pe2iardjQ?nIm zo3G$MG+c%&(I2C5mW#-%(st&se!#ePTvb{pV{n>ieCRa^ngSZoi0Ifbw)^bz@{)?( z$SJ&t!BBSA${fhc@0^PSV>4*@FXt*)lE*Z3mIPzk$13BkwAy6Z0>l{-jc1#euWUo+ zY!#WOIgxsRlSkOAJP%li&QMMZKVJD^Y}%h6f+sv)-Z&`nyL7{rupqO4Gyz?^GAiES zDXf?iF_!ZPGxbj*9%TvK?3QstIQyOoLP9KhC5UE-Jf=0^1y|hXSEXvs!0`yB7!!n} zY_(mPk$*NF7D=6cQFKROaTVjDK^gU)u=g~Q;>ttumS28t*Tiksxf%C1)|<=h9zb%A|Hz$a3i~%Dz3J0|x}w3pk3T^kgmQ0Y z_H)_z?(x%ZH2?Q`Iny=8UJ2t=yV{{G6PYx+`9)IHljtl|C`{_~&vjg8QuQ9x4#rOV zlmnC9=7U2|$J};1g)~}vn5SAJ*^Sl$)xeuYSltAuQt=HAO~m(;MRg{-^eMeZBgtQM zB5j}Z#CA_26yuLTxp$Gru)T2$ZPLF@Ij<~ii+!C|y=TebzERuPuha|k%&Ab;=l6tIN6Va${J4*cLxyJf=E-uIy|`d_GT z;Dh`BLfct5{$FipVPa?g?`nIG)_tH*R{>_EZu>M`uTu6fk|n zViikF4+E1r`^w7K4*xen&#(Pc8h!698gB)P?#JSZsvE9ZSKQhc#f#g}3*S0!ZIG^; zEmb9y>(PLe_D=5XQLinvSLbfn#cbiSUJ-+om!fJ9E*SUt6)l%3_fP9}?EAW!U+?ai zL${Z?Up~BzJ_}b}%UEZR9p1K;$8F8@Ue0KzmCL-aVARjSW(>|f3kD?h09!+u4}!Y~ z&eU$C1C7+>H~nv*D@w+W*V6!ULG^JPdc$?lw)E0@UxI$URphT8x*83MRvW^FPZ6!S z*Yzj3uJ2MqwsVROJC51K)vJc>#jUos3PO@AH=3-mm&Zj^YoRgsqVt!sa&oseSJ(6a zp6E*gy{K1 zBQd8~e8Z9pvWa8;Il*EYVCR4y46LjWFWdZMg0i0hARuV97S-*njU*c!sr;kKo7_rX zAD&Bq5g3!h27mVGZ7;nX!a_mlApkp@+&`fNgv+HV>xM=x| z`BJs4B15fEie%fU0y>xJ&!z1HIkB`OanPbu{~Ao>+g%hISlq(knN?LrCZj{2@;GvK zo>M-~2sQ0j_2xo``I=sgD@ddQPSSnH>p2?b3z2<-kl){HgZg%QOXuWJ%~H91ofrod z!h((d^n==~I8flnKOmu@>3Xe&C0{O9+#LR%`jLJLQ26rFVL>=0k_;#Eg#oBeGF1O? zKy%Dfv$qF>79{c~0#Y_lx^U%)0ALs8Z8nfLFjc#X-g8K^tWR-1<6JD%AXaf{=&&L4)RVV`>CP|KuG z^$jVp_ZIyE1rT4t3+hS-$>s#)=mQf8<`^Y0F2Hj_c=ejvI9Z)7pEVkOxzkjEjn%_3 zrNpQ&3o#|EjgLI0N)X=7Lq>-Y2~kvY*L+0! z-Mmz*uJWvdBp!Ibg*D=h4#-2ra4lD?(Jo684?rUom5pw#xXo7}LC=jWIJViaVXr+U z>SS=;EU~_lO|P|kQaFP{NMs%26j)Ad?jo-070CQhd5t2Ok$N=X?fS7&i93iz`Q@Av zXBSTq#1{m$UoxT-qPyreKgI?5NtMjna7$Z@P+XEjRw0#UdLyG5{9aE;H=5PWkTCup zGYrniRhp74&^&u_{6i{F%Pv5z<0wt+cf~;Ua3Ggf58X%iC#%7HM5WNyK$pO=vH;?p-?>>5`J*CO!P5@`W?kuA@=GB#>tI1tUODaM9eAyjwoIchfI-& z*?mOn2P6MF$*|-^2VqVXkd92#G7~x?loqP`xY3YIU>8++*l8Yg;*OP0Vo2FT3bHMszrGu;LM6|H?zNOOrR{a*qHF*e#!QMQm&}G2G_;Xl9QNfioTCr9XaW`$)^RNO{ZA5+|5vlm-Yy)S ze(J$)$^UTt!w?4#{^-uUFQ}LMH@V}FXBpowZg9BGT%k#f%6za+fm-0$3|)Mgb@pfy z7kX)zw*a?04&Bte&fqr~)Cze!!&6pD+csqf8DStQn_!gvY3p0y&lS?GaLN~nz+ zD0!(^kkIl{1}hPi9DNkAJc9hdNc#(0cLd9=eZVwyVBB z@dbEhd-ZGy+ymcWBM|=%=mvP>H$?$3ldW$2u8MlU7R89_kw?UCZ+-oWi{p>bb}DWoPr#dy)S z*11#gKvoB#GSw|RlS8=hInuZrC&y7zta+Du+z2w*dFGt1v7hib!?!b()ovr)58hO0 z+#Z5^Je@B44&=8U0^LX@#ayw(*bM#W32IDy+t?Q%fAshFgYRvtIeg%W@2b&b9a4P- z#Zk|fzv%cfb3)WOJ3Bt1hPLxzO_}Xg6=K1>gNXLCjIElmS)7>8^FdTnjE(Sh62TpN zDwv>I!PWhV*e0QjF+C$7lrwMo1*kyRBay#QA~(00XVO%-=o9F=m~!q@MYpU3nxN?% zd4yf3pcM*xDzGD1Y2p+-@v+>kKMAHBbz|&U9O#!c49ikwl*_7kPNdwH=UZ+q_4~gu zDjK=lDv5;CuWE)F&Q>ap6Xp@YMcI6RsKtinp6a;E)(Lb^#t$Lz-loF9DZL__29rpg znL^;2*bNOSTWNAbj)JnFd=~X+$tKl(GTAf1bzQ3R9zo2Flgmj!t_7uOY>O!RB1p(x z>vVhrsthIUZl~yq15_Mn?hF8Ir24p15MepVYkW*v;SL)M5&Uw|eHJYUoSgB7tIUZJ zUOk(16Fs(p(WV#_w!v`w*`f2rY8a_aH(PB)g59$VjUzmHakoO3lFE@wHK%+TyXAF5br++JeT08{Q=$eOe%wO^bKF1t{I{UC**+ztzdzyOnFBO}3x9-HLv5}mWz&Fu_X9u}Ti2O4)EO$+u>WF=AK+zt8lxbVWPLpaL)lLdY5W=^IWFJz7#o6e=5gctLud~D^ z0~gnn23d5-UC~u^;zicy`bowz?vjWKGM!d4$(=kL?B2;?OrGQ66Fr-ge#!*b1P(<1 zO$Ocfnhz>5ANsY+Cc^fR-Tn`W1)ua&3_OGE(xd6FYUEq=E zI~21W6d^!#K$5odBudI*hLd4r_VX;s?&UvDO*1_n2b|K10{75u2`2B+X^&mksG=bJ z;|&v0`c?v_EartgVR(EXOK3PKu&m^#74W)&HoE7)uZWLhRttVDsKyzRVVABUhi0h3 zbOy>n3LQJU^?>8ba=4BzJReoClek`n*)P28nWh}%qy1N zwDks{&0p{Tn$BVr2^Kl?f0tabNOM!MzLoOd$8JId6dT={j*QlcbPK@I?%nyRr$8}e zlz(iEGZDyGUG5=!nTEvxwt5YrK2&^pDqhZ6qejE{HPp|bpsg9vA?bc*2HgitpVcMl ztNiX-a)1;?B?)3eQs{aC3tgY|%YK%kK(1xbtlqBl7|&t#FB}_`W5fG6-eNs6C!jm} zI(oNFfU)`lJ%bO-|I}WLXEbM6GQVUQ zfw*=8MSZHP9_mwOPxUodOeFT7=&i;~orQ^NDm|f?AK+%NzdD`O<50X?&oVP)`CmS$X64#4wjbP`lNC4j5z_3mgO2?6@=o&5 z_xZe@B86@zKC@B(r9EAxOIJGBZlCc}LEY3_m;cng64_$Q^m_pWrIzV#)}18JazzwT zAOZ>Qf&1D26K;ISovThrQQurXdZs{|srF7f4HYj|P2F7jydtB2jD$_~`9A3)#tf%w zIbb<@XzN$2Ry4BPygY?WH)-i9iZ^jLztryPj}u{6HC()|*EO9Ane=jo=GAE4xh=b$ zcSW(zZ!8Z9@DACyS+}v+4@*W?m(s7KU~l4zd!EV6*YCux&#I@UbWp|Hl&~__GmTW& z4|ntE>kOzURRKfS3B7RN2_&;Y26nkR3|&OparIkB19-tQoN7xy1eS$ z!!rcA%U090n#d;VLAy&mP16Q&x~`O&g>~b(^%m{A{P4EaRsOuzZp5}dYuVWE+bH`v zKkj@AzB|fi-`eX(T5G@gLhk*&*A38{fs`Wx&FGP7^EtSJYjHulzh*8Dkc+bMJpbvSc`BfPal z9~Xt_EcVA{gnHrHRUv8s|dP3Z|Z)X z4N%(eaL*W^TWQ}b|CZ~%_=si+dJ+}B{7YiiMKk={D{jur;_>$T3y@#~uBdJe#Z*`c znGDRmXodB(UyIKTCWh9{vUNK@h?c{`4NfyqNKcfk0tVj8V}{78BLnqKA!txb+yH_L zLXnveN4G2sanz{T&o=jyP*BNEDSW`(8lz{pw8qwHPHoh|{Cg+0&9h zDEBDTBbAU&-TFp!Wo`Okc>(8v5p`?|4#^mf0E%D+1@e6)(9PYXw+CFUU43fT?q<0&-$tY8wU`HUwp%2;87aiq#DTZQ|oF-P^Wco=a) zr%#G%J3&^r>m9xv>Hc(5hu$8yP}PYK7MfC`E=b$&Y5t9LMEzDA_{~O=m^fKesba3t z%I11VR(|6mBoyHEAjD6gNv;+@t<5p`V|V;kMG**tW!Ru(HNL2n_42?JqnHK%^c2D7;|K2EnrbT#L z5{kNK*hjF1gn8ILB0uL)8L|%p<^rGIM>bIXWKp?!N6G|%t&9EjE8GM`>9D-3pyow| zFuW@Z?(n?i!l~1Ok!1UAxvbLVUHuUtc4z|pA_#xw&6+$Ye9AeU^4U-`Zn532j3JO4 z9pwP6+8=bhu(e5;oQpXTOZ z_;A>~w)|QcQzg0ztF*fucmljE8G;#l5b$roz61cTau{T2j}L*c5N`?nS!csOg!xBw z=!$&}2M@QtKip2`-4SwQ`vblWAMYm3FBgu{tAY)KsEFwZ8WzXJYZ6wlw4H@#oJCYr zUh|e?NY`J#_98JLPoRhlx-HPwTHoLbSQ)&gK^obQE&)|iDiyB%*(qE|vWmB^l4w4| z8iRj6=8Z`L|J?96oTb}?2e&dh$N0L^db?jtDGy_|m-n~;!>1IoTa_9Jr`1C)R~e*# z;Xg0E0=HvS;R`It75)@gti0GM;6G^N{*RjG5W?^uWr z2?Kz=J6-3k^om!quX*MHL1@`sXuc{^>xF*o^eBX}es}_@y7jInS;J|f9{)wQNXh(d)NO_cQMyU<|gJ*cic??KQt54%meV3)l3br z;X2GImJ}R?NzyC#`^1FgmHhOu)DjIc>O5&lp5EfTtQ$7CeQbuq6m%9htN|v-=jxDU zY_#pnTZ_#3BY0ezel?q*=;&QmiAebVrxVbOH9I6kgHmw~!0 z<%h4~IYj?w$vp6ao}zco|H)p;z!x;jDge3%<{W*3b{)DDRisC%pDdglID((tyQ^{V z!__-6P{qYLDn5hww1Va>)}~yU6V^&2 zx*k|0hMXvPvX3TS9Jpl3kWJ`0VpTR0e<06=sZV9mAfswjATST?6S{3+AXp|48K0+~ zX+MG9PPcOr$}zf4w+RwzX^j~wQ@3%AQtFQCcK>@Y68t{6f1>27eGDRj(53P8)3-$>ofLIPB-=F!oT6vGuL2Q7$Ml|~FjL>txZ^1l=>;gh` zZX7ec!WIW09wSI#Kc3?y^2Dgstm4V=*hEEt(eR~K6YME8#8T}FI zR|fzsE9VdU-hUqsDT4tp7I#i9nJqX1$qXrpk875W>&PgZcXZ4qS$~jb3c|rpI*-Cb zp&etJ@iwaM9Qt7gkOP?r204X#rcoyYt4qSbAM^D@!_WyJW9l6j_w+7Xl7>*>IoG3_ zow!}jd{pI??p`Va4nraW)LIb|3Cjg_5$rvbZV3J+@f0P>Iu5+kC^lF(Irf{jhsTw3 z50+y%te!OZV1=#Y&A{|=!vBxiCB6nte@KN${cHm~8Mj0(o5xCPq&?>(c z|G9;iV}nx`44~@jDvkW$qT=|pIpeW{Aey*PA0WuPl6_2}aBU!9lsME9ef&#BXG;-k z=7of#Pi`3IxadBLJKNp{kz}lqy}shnsX<$Uyu+rg!tUS$9ju1 zLS5{mcl$p@QOKdTFL$x&Tf%x+O#aKt(T2!Gk0iyw)nw8^LxV1n)m?c_A$WrURC}Mq zNu6C4fso#t&6|6we*+*#k0j3?0x2izpax}9!1ogVUIdm}>M+Nrlf z!WBP!6n;A=I|I6I73{EuIkN5aAP#v`^n*`vKeBCU6Wnz6SWuK??fXn6I$uDj?8>y| z_ALbG{+!d+s@yal25#Z`&rJEPGP7%>ct*mM{*8w7e#p)S4?^@6*2pGMr6P>nLpG5` zi4TXtDwW>k17ar*>4icLpuxbo#%K2swO)7Jal$vEj30~i?V(zimCy|CLym-71!S{9Dkec zhxltx>^(6MB>fOCK$T>D~1{oCy^ZV?geFTsDD8Zc%p225X%f|7+X$LUXtp;%zj*`(f) zA!JpZ`)E*E`|(CYk@}r-Z@PZSMCF4%at7wRlH-P?T@`O9Dm91`*glXg2qw!ZG%+hO zCxEe)Tks;DU-sS1nAx14H!EIEdL#$)d_>anD(feLpp5a*B-!)@i) z$NcR!2 z_nOq0NFf0p+|x`QR=858+-fLO@b_#mt_VHsw4_evzn$*NzkdRZi#U*uR3iqx zM-EB8ibrgKBn0({3J(d3pFgU!y>qaqSp*)#BX8N+||gssIln2X8X101t+N+ z<`lICr+`tFUGG9BpA}HcBxVlC4{3-Z!hSY*xFg z^Y?yj%7PJLXC$^0f48(nC0mCUDE{;amS@&0TS+pv#8bt7Yc(30CvD{Z_2KIG8E#*+ z_3N@(yh293xJm1vt~c=aW}73cq%&|-^p%2omQtQCHj30&i zx}$2E;4>MieQsK7rE1&SMfR_;vEAs2nk}_1PRfQE5rJg+$5V}1y+E#ap~oiD2xn%w}+(`&04Qxv|c2C zND8FXJ?-Q~folGw1Sm(*-yTklMgVRMC-E4Ux?03=(O)EkP-j#v|Fc~H_6cZZ3|qH;qM9~7H=BGnWQucld-`kCg=@@xpi0tmb###=)T{5^QN-h#-0 z4D2f|Ji)B}r$3j&J>+3GkLy4A))bAQ+b)VQi3DU`!x5L5i@34sEdfIHPOud^M`VL8 zo7?UO+CQZdm#bo0_9wEf(~I?8({RQF5}8P1sA7Mo9Me$PB@CkFliNo-VEHK*n0(hR4hyz4noJ__oI6g{~FN zXEE~8TuPbyc!v4lFJO`oj`|&RE1Ca>5wg@+3vtT60O78wVfKZQUD~gO$7QKDH}p=4 zjWmmoBrY=lQ3O&DeZ~JEFcITU#gY?Eqm1;&)5Y1>EvlvzeprPlNOP8>5+LQPEZ;nq z64bg*bvOOT&*QE~UC=J5YOX+>`4d-Ju?%+LXi6M@-qiWD z2HCF`d7~Qtnb*2u?`?!BwIyGq0&%<2PuT|N))_)tnX-p_KY*goV8h26io_M{FL8ez z2}kLHUujM)0Wgo6ofT}4*OQbF0Ca)DP7R9$Li!PQIZn~pAT2cGln3Eu8h#^x8(DqT z*7~f^B$h&pNhSr$L~qrf_kEL=>tK)ymA58IuuPN7K-&m^qsRV@;bwvSr)1mL4A5=I zG3;}DaB^}1GJD-Li14mF93ESMA0$@;zJgGC4Zu0l@;_*M$0%8veciWh+ugHm+qP}n zwr$&(?e5vOd$w&Gvu*qI`>wU_-fQ1A?m73vsWGZ5BQqi@Gvmpq`u{59iB9ZW9TzP$ zZ$CT%$TRfUA(D{X4>l9jbo6d|#keCKaMDm`p3}~~d4*mmxe11=@L^=LfCvtS32D+l z=yEuOs|eMHAn-A!Kn{S3Mn-hL@lD4L9VUGp0e**^mxMkl1d>CksHZ%@!C)uBO04K0 zbyucpRn~j>0PgX&W4tXdP6*{x@?Ly9yh2jHC9u`1_=EScFb*AOD?5msI;^-yCF&mO z_IFG0buY0nzED^)pZ3MPnrPFN{P)om;5x5$6pP?@#9AsjcDpJ*5$bDsUz!hL8NPm0 z3;qG+tSWjQQ9hn=<_3c0&lWnoz9p2?PCk%YuI&M=OkZ#$B;bJD6{>3qf3#(*8Ui|J8a(^9Rdw-IW0CKju;f?v`JL3U55farkDuL z$ePv)#1j{be;7%^6GvF(5)GR^c>kca;ufrIkV(7R*e>=R>|ag^W#L($A>@{ZuHu^? zPL2#px#e}njSr42y>G)Ig@shZvM-;Fu&%9PHe^&p7CUycaX@I<{J9>FN{%$llf9Xz z6XfCY5rJIEjKFb1oTd9mXA2h)NejZVGYWyZK4Vr00-ArWY#@KASje9p%@cw}h*X@A zD9#$VQ4nUA68a|4d^P3;n9>X-Gg3g3qmE?7QtN?Cid{Tw3sv**xR`4CFmmPvioaH?D8H6CB#56)^?617{lUGPf zxSTr7C_^NH1q4LmUCgJ8@$pm$7zr-mNumGigTQNA5L zw%QyIhV)lZyx{1j^ni3TLFjleH)dC?h?Ox;uEkf4^(C(-fhKWwvDkz@TnhH-NL^f= zcA@?1AXFjnvmc1Zl}ezOOb8Zzr$B+4!ZG0cAmaMbl`q>Z<|LbjZ*v7B0EGHqrF+5j zIYu26cN;2G*3a|e&67#jJU-?X#*5Va1yLfewN3WI05p~L>c#6ro?37lm6pQPkMSiA zMDL|dUx$=}sPq$Akj$QPfOb2rR$8}hGCD#n@&V9o%O-9pKdGgADU%zhS_&LcN9NFk zh({R0sZL`Q1oVQk0~VX+)9+L0=OXA%z57gkB7vzJ+wYd(YzjRm2v&mG710nMHN zD4UEjKGEoef^B%wDRFe9}K`^%d9iceqQ=f(T6-}zA^WoVb{?7?O4X$AVj zCrW5Kg&>D?H`%dS6pau_0J;h{XI8#i00jWK@Sb8Mznw=qLGOsq;LYWNGaqP4IOKZN zF&U&~3>kkyfbdMV%q*QiWSM*^xgv6;U~xWTHXp!-sFP0vA0!432tq9z0DS*&J8(Hp zSb(C;`HBBY1yKnV;4aJ%`#JIVC@(Z4p)fXjd-*&p=rOH(;?OM){m)Dwo>#XfotL&| zF8QDjNaBPIZxt)pdHe2RcH{iLh8F~%W{K0H4BZ8E2JI@42djz&bY}qybP8Z&Kmk+F2>o*= z2=gGBA6+wQ2v6$KCXQ%Tq2!AvKF2GVLHjpa(1qZUN$o_EUfCK;pSg2V+-{C91jBba zadNxhty*3b6*Yjk3mCF22bixQ4(-bd^u;b83;QzVJ{B;9u?#s_MND`LP_@Sz9nJOa zGLh#=O(HR zdGl?U&9{f`k|QC1{Sq)?=fp-Y3tm$A39vd1|p`od9f}r>&B4+Xjl6m}LK@ z=bWNr{hX6#Y)H*lWP%`Dnike|S^jaH2Pv_B&hYWUb`i~}zOWi|LEswLHcrapz!AK* zsAs(4T-LVqj{g`%2^iCZ$Lt3%E6Wce`GshAkCEiU8VgCXsy5Kj5l^NsBmM?V_WP;A zvZp+{+g%bW`}FSEoW?^Ux8X)q<2hZN^Kt5edk;hdbr1l6sKTAlTY6*%^A&QVLLPnC zS^-1Ya{N1p*qCkyk==L^N7w*A&ur#;@w5nda!zkMYx-mHvCpizWQsE8-^_;>>-ue3 zH&Y{P8lj5w^ij>iV%>^2d47JcSG+R%*ouIxkp=>@qTbYsffhjO4Q!ZC79c~0u*f;` z*rZyF!>ZQeD8ow8JU{1GI;7&nkHj4T@(0oC?#01T*Hf z{7ULzHQP?+qmF*88#7d4?`N>gX6G4X-NLLV$C+lGCs-@x8fD$qeZPIL)Lu7^GtPQv zTP^wgzJtOq%I5S-Z9UoFD#xL|XvjhqToyMEm7|9;mcx8Q`gchzPHWI9q=y9Hd(_=U zvKlhk3n`g*)RU9kcY{)O_X;=6P6Wz3S}nA?r! zkv8?SPN$^4f<3Ckml^uIf~rpxy^4agfBlV{=*WZp_QdnL(?z3zJU_Pv;(>wdGZ-F_7!gFB<``P@K-P z5fEHmuDdjn+#44HVM+*siXr&1qtEv2<~Ea* zSE-m4|FpIaFUMc+`4kC`w`_>uw9_KLshGn0b{!-r3lZmG!G5fAmpsNAp}eg>UJ`rH z;sqLob)1C}b$FRPE<2_j#$eYwl`igTWigoh_~D*JdqoFM=_U7_X83FO^cZ7z16aOt zmFcDBJz;Jc)Mi^;!E%-ynu9*K%ho;H*?_jCdWdRvIS5rgYKDCXap5fdgokzDG$J;J z+(|+q%k)eyTPu9~0+1gN>Hbgg>wov*`iFmt`nQ3KiIFoDosx^8^WTmtqIQnoeky;x z{+)vXicVfxM4k5Aqsv6rz{Z4toR-es!pMov%*58j(ZYz<-q@7lpLygB%uHlmYz)7> zy#7^8!q(JI*uuz}fRXJl^A<%YIzd4@cLFVD1{PXY4t4?tW)>#e@3@+cjhU96z^tWr6sGY6z_rkYF7z5jPU4I>j;$I2&@5EnG`QHf+0!DU* zzZ2i?VRZ72c1B7j&IDQnbl-kt1a!(K?#=|?760>)|E^ZThT*%+KlY6G`|{2`H1P5ksq33Ud8`7j26P>V)g|njvoq)Z) zuz|AyowS{if%SK5+St=+lRMkHYf~uOtN(Wy{+H(1|N8&`TXTQc{O{`gkMjE7IeG#{ zj=v5=@h=Cu|I*N368c*~bP_h-Di`{99erp0EBF7nhW`Bu`_AxRhIZ;)iHEE(6E09W z0Dr!m0PQXv{x~+$dIdcpk#%9Ro9OGj!PYuhi{EMxQJ*FrH`|tg9kL> zyDw*;lW8$WTxj3|OW85k&ilY3Y0StgW55KlX7qbgcetM&cqOWL&ar^gBc15$+xRVr z7X>G;93Tch)H^BU$9wJi->&#NO2`bQ5?A~U(^!>jjZS$M;|`raE|v16a&gu9B!dd7 zvf0R8whBCOy>6GlA-+ctLDfT0Hd-%vMGmUfmIEF^DtGt}`?ji^z=9z)X=pC%TNqTa zDKo*;7FF(aHMaq6osy^hDCr9?jzJo+hVY(c2WdoTBkBVg`o&4fU!B9?4_$4P1q%$n zu(ZdoUq3YBpJ;Jor8O=sDzP+19(ip|jT*)f@%&MN*S+uw(sX1g3+IRf1br}p;9kQ) z2F(D^a}7Z$+Z2f!M~29NHevvQ!}N1DwX6d!Qg{*aggKTIGgo8P3Ij-4KOj3JdM`Od z;c#bFm@-yK_yD7$m&V&-uhTgGpenc!T#i)I0iz*u`Qg3z$?TX%&6#3nPF|g$RP`DU zHj^0$r5svOkqb(ly7Qn*2~#THD_=_cI`LN*tX;x}*siP#GSMg7(<+Qfu>-w)1gN7e zV`KxGs7kOIauYk;Y=Y4_lGFQI>VdsQ5&PNHNaTohRmuRviM!6cUZh{_Ua9KPx^#=b zCT4)8u3+$<+cB!t7%B2_0Tt;{%Co1H`_b1Q(ndv8U@;8;7-Ax!$J3kGqq+CZJ)gH; zhPQWg|e~v{SWfTzO zprL}X>_|HnY~ap0tqOro%bw3uL$uHXuJF6hOWlYZ#sVS404uMNG zTNK1b=z>?2ibb$L!^o*u>t;bCX+Icvudu zDj(N9&ID#(haUg#|Mb|qyRgK)Jo@~!HRtY`jV0si>H&QMe`bC@6^}R2;bRtI!L!Y` zL3Uwjy!|*=A~)$aYgfFcz4P#NZkjq_;N`sggLWRC#`bv(j-<*v#j_Lohq6hlv1Xws zuDbITZEa_{d8SVM08fxZSt2tx(|;1uuu^i$ZU=X9SaLI;w;D)mbgB&1Kp3xdvzpfZ z)%FhVDTZXRwbau<>Ye)m#q5?iBU4Xc(rM4GG7sjMf~lKy zhl}ha49puqku)spXRNGg+O{DnC`%p%Q6?ZN z?M(?c^tx(p&Ik2yv1%HT=|ILc#Hd0m@f&MY*_>(KL%X-CH>8)`$~-RiNmCJabHJx+ ze*L!p%jPxThi!<$f9R(}gW`Ci9j88u3G^{nTE=r~lw#CN}^VXK)aXMJ{ixqH=>@zk~J7Tx4>9mQnxK;!AT z-)~Kqw-LGAwxWnR&cHSk67}C-Pv=~NcyjF{T8DSeNKgV>#|hoRVM-x@ zb{rnm@js9QHU0o&Re@)gv6sY-|IAaGKA1H6nUr4Ad!HYa&k}sR}U(cF>8j~dZOb*kT zlCyTweSwd2KEhgfg(qlNI0VSiEaEvCG&d2QXG-VP_Hyn^Eg>S3UkUQ&)!hRa5rZhQ zm~4Dzs4LZ&g&Ampf~5TZDp#Ai7v!226ZAsDIpu}F-LF1y1Un;eHn=kPeHtN0I$RuF zFl4sAWF@uf%*9IV`lj7$uPs+J%e$ePmlie-uN_>-moQ?gCF@LQU+cAocu7BsQLCd= z%mx(MU!j6D%b;o?Q{3Vv5OZXU#9XYmSr-b-dV#r;Y_6;RwnG@=0WxaVd0Aug*@ZFs zr)bETbGObF>miY~6mWmC)3){|2j(FuQLLOOfK+s5p|2;9by2K?L_D*=J#e^SC4H6T z46G!FL6DwjjYCIz;|>!y|0lL|wyzd)9Hvn-*QsI1rQ{3M(DM)f-kkYip92nvllfbV zWpYj;C1(s=-u%1QvSz=GM#NJ}Dt)YdMPU74Tvs&@4w=qd@{Vp@zkzJ-UfrkYcOpYG z#l)ZY;Ta0Gl$7*LlW1=2Wx1nuOy(3J%|#_TxzCJt+Wz#!(QcD2X)(6-Qf5Fhg6U8} z(M(qQ^-{(h@>cQh6*P-AFa58GToAyo6l#mBtR?`k}P3~C}2 zmCl7?4MIWfHw*!Ppa#goCLp0I{Ne4KQGOKcIATK(FVOzchke|_z(|>&_gFS$S8aA( z=VGQGUR_=8ei5Qo2_1rO*HO7K=%IR4MyF3Ec!i%_=(^9VV|Jw#X<&!XX(wV2hzJGW zm6BCiSbd~E^E2XflSO(|CAnEesiRc=QzR&gQj~ZMM0St@71_De0;g#^BI9xP%9f<+ zG06l51^;7SF;EA(yhvs1G9RZ@p}oehbe=q|pk?bdhW>ugwaa#MKWhy-)DF1VpuS7N zBZ9h(IJCYuhK61sddcX^Wd)o-`VVh%ZF@!6ul}G~PcvK>P-o3wGb#;8E{?nc2MY+u zz9rcr`?C$>s*5r6%u}6>zIltfZXQtCwCjxXUyi(A$ z+X0&Pq4$DX@Rn5=0#K!F&e?-@QB$ub-D|7Jv_~|4-sFx)n9q(U?dDiJhn?D}8mmY> zbmgw2ge^m=H^B(t?@>2f>HvvjqWAP~x;sF03QBlD5jJ|HakhFA54AowBvJ)g7}Pq2 zFGm-1AL*X4A-*9T{*d?#N@fnfCnAs*upkVy8&rG)0QlK^G2$`{eqfP^rq56G)sh-? zlP;R>(c*ya^xjGmN?$Q|dAU(zGT{#u#Y*k8s2fRK)W^2?b&JAMwX!a~!Dp(fn@HYn zdHTe0qzIq{FvJi9X2hG5AL^LRPeh)Ytt5=>rqf;lw}bE?6^0HF2vsIBg6@hy^pE?1 zHvb6)W0t)siw5WR4GgPx08RaY9Sj$Pu&WQ=q_!3Hm@5oKz9!j}iSX0qQdA3s^ax;W zuX_fKX$I|XHkYL-NTgN*=ME!)AT@u_v3?WYlew^t>?>uUM2PA!xUApenYGj~1yfke?{!%@_V`1N+i)6D=5zTD)^%|hDs z%?dSsT<-BcPhY*Dk2(q5`hGJ3o2Ao6{UlbTR-!{qCsEyrsDk-1-3NIdnA_QU56 z3ilZfhGl4;h^ta&!bfxCOifltYT3#y_)o{j-V(<|Py~0NnR@3w7~!-;nkhDTUtfOX zw7kvyCE%Bo^=Swb?t~eulHY(RQi*pxF8g--fwbuK zxQefp9L5!FZesvF+X?w-r%PLSXCsAzn@^6)s%K8krcSz4-L#_Kku_C`jiU&RioCm5 zUFBL3j*gkku2Pcy84`&7?pfIdI;(~fYRe@D$lDBZX5$eh$!}i20EKJ?$8aY@*?BZM zjDB>dti4VtuHUBk7Zm#EyT@&yeiRnT%Mkbt%@7&REa&R|1ZHY&a$JFIrSmg$ZBJGb2dd#{0iN7WdI8Pwr`Gd2G4f^aAvNVS z+#toknSIWsgU7OV)sl*bEhr{M$YSfU9$JI7q zg@cE$s_or4<)C`+6y|2QP8!TViAAEfUt~{h!#ojgqPlUyZ_Y-=+k^)O@pAS-rXP=MW4am$^9Ur-=)jt z=PJlrK}gN4fqES=u#-Q`m-O>lSbR2vYAJJ_u@elNympEP3P~ER`B4HHXAmdrNkX39 zpSQ?OSTb20&Of2N8wh5KcFspXr9QPPadmRTe@)ZG12TkMh`fY5To*cA&#@2Y4+4oP zTryf3?(Zw&PhO2){*M$62^M!>+t#6tTGFFDxQX&KpA|3aV4v>gAR=#!Oz ziT-am^*07(qW>FQ{sV(D(f<|a{e?ll=T%^${~M6fi83*LFaL{=GJkJi_y-@Q6a5YV znHc_6goWWd@t4V`BEi2=um24>W%|ai{~%?5mHJm~$^3snPJI%`ZTtCQM4n!t=nN2z zyoT;p3cKA>>Wg%Qne!z<$nU=vC71$c<|K(ccF&wXFalW+Hqh^hd{MFq0fTCXcTiR^ zM9|>Jo6~ukVc0tM+S1hq&trjWMT~Ozn*v7=X}t7ff|oSPw)7cR<@72`iCv5+nA(kR zX;v22|D=ofQd7o|z#Q~}9Uyo$eE^qk!SwF`@!>!q0YVmy+(7QiM-83hQXU;?=D<61 znMd_6ghd-;mKgc4b6YpbqtciGO(Ul@ak}Z|U9h=Z#i=h~FcMOY6U7ndH*y>lqEim^ zpLMa)45$;wDmbHDpYl?v=cx2P^}%oys5#LQaM7u7W-1eKywSrMM!tfg43JeDGgHPp z#O1UD?}J9vVTd5F3JRU((YqXt6k#}_9TRudOnyL8(vOr4>(bHfHbz!J{^=Yj>SV(5 z;GkI35vYgn9p0jH0}4y>>+%0B{(rR5f8exiEGz^}jDN}P|D4k@u`zM{U*NP%On(dU z|0k#YzI^_vzW;liR-4Q5ycKoQ3$+U<^BEZX`XH4lji~J$vK^UhtNSVsAWP;p;sK?c z6ayEKyO)Q+o-cn(?~`+T)BEeK$L{O-$fx`3j8%?h^UE#k>Un$nv$YMaH1(2q$insM zh^Cc$vFzEP^_haWcTMjmuO-fHVJn3qtV7Pl5w+Zm@38P05Z-b5>(gxUzS9GvxVOk7 zS3C4qq*5-otmWYS(M7dSS_WTivXjMFn^Nq}zR0M;iPG2$>*5T>u)7k+Ma3fso67d; z5p?X0djaQ<&E@jAMyDp}0hE%8)JppyGDmNo%zK|ve&7OODY!;9+XWDbg$*@gZxFDsh2=5 zY;yR78Or?!go!k*&0K{5nyAmkz#u&!@wI11umyC%P9dUv#ljHAKWXv-I@&ERv5qJ} zyb#=khM{K6u;=D0j-AnneL#OJtY1F*M+GcgWV;FE!wY^0bUUJ3zY%HXTn6x&B%3pew|Bcf0(?XaxtaZ9 zy#9BUl)9#wv(2lqthzcRv!0?#6% zYU1*pDzX5FS3q=_+&5Z-K?7N+)tN3j&C)WqHQ7Sz|Zf{)IhOQFMMWMtaC;$dlM#AeWcyX%q9AF?Zwn*v3eS`;(POXo)S|ubFIA4Xb9pn~}0z zBMZSpKkL`C8I!2ypuDfVGQH}o_z=p6F>hCUN5I^)p!b#Fy%zwSN@lENk3KVsyfdj_ z$b4ZMgnc-vzKr`wSu2Zdlk*&~s}2>8%XpA(2UPXdoS?9iHmUK*E0U?ygM_MphYAs9 z^8NftZ6l`!s;*3eDnk-J3T(78O5zU@o($QVi%+;`jZi!7(5G2%m!z=jbtEu`9bGKr zGnpHqy1ey;nH+Jy-@-ZMYJKEKGMeGDCa0BiAx?GSEWrp!pjRA4rEv5o!Ki2c#QJ7V z(LA)?K{5Nk;0 z@}FXZ3c)4Y1=K6c0}nU>?8pQQQzjbbTMUS%%Yno&E;=jrrTq}dW_6P#+a>LEF}lVS}hUzmy1qZ`y}g#5GWnxOmo8b}c#JsTxqQ<-tM`|DThayj3YQjjYy6UZBR z5--I{GqfAN+jJJXAX{$m1p5&N>;7XMWcw7NU+uAGt?05!(04QZ@iF;PUNA-M))-0b9_X_bkkjl8WXuGwQU=_-+@fabn zY*sWU!JntbaKUe%amb*x&T~>pW?E zL!V~4_0S6hm3zk}7dS;+M zfHc+SbA@s)sJl{shAUW+{#M@?_Kn%9U>z|h`DTDFZ4YV3O^pKgn#%xx{n^ow7$%MTP^`$c6fAB~%5?m_*eA^K+eL`c(>6_w4`Z zqEe!d>`nYq546&LuESyq@?^$}ZyV5~Ag0iY2g5MfUlI#amlH#z zE&W18BIsC~-0>-N52u>S&cn!5qZCw1E;<=Foa?Zz^V{?z(7bf#6OhdJ=Retf0dwbq{O zXBiJ5Au#zwFXSvZTRc24Aylh)mo5?`qyEz#gnaYSW~dW(?@-v_J<^m;y+hb*NRali zETq7M+t8-~>G&CQekNFlqU4dq$mLI8ajNuu0kgYIliFm5w9lWAfap@gTt)RUW;5e; zMi=Y;dex$;0wA8Q+G@oEJ*X0)IP{9H>}SS@^=v+_DhCNq=tq^R4+v>*611|~e11o* z5arBuplod|v5`=EAbn5#x$ynKam6^ix`9C{q2H9JGk}6pt&-N$F*23pv-rw z={FpvM<}XbOI01W%c#Mxc?Lo$t7@@~5gehTs+hql@sRX0 z9*n1cc+T7ftyanf2!o(?>Tqz7*BVgVvJxTAA=#H`BZwbmF2QO5@AwT3XYm1;;Us~t zd8_(oLxvD2Il=#^u_-+L_X04rR8G@?;Q>cgjJll=b-I9Lk5-hmQaDUIe>BS> z(Tllg6Zaz4&Zo)lZfVWg9E$wy*C`w8Kp#M{T&hNLr{d|Cw#FRtTfa{Ei^0vba=%r6 zUmZ5s1JZ0z8r*Iy-Z^R{oznJxbU~{MWH8^HS2b7^^ahiB4~MX{aP{0LoacGmZ&&WT zG!2Z!x&ZVtztvv#XHC=J{I;HRUPAcsB;1|CrW@pdejnezqzrbJ%eS%Fg}09Fk45Kc z@wkA8NsqCxNg#XqL#%P&lsF(Tpg$reBQ2NDbc%iL2&i-7a9upTcsN;isR@^m9Bxix z(dyWHL>nYauf%GU@G;31imLNBse69q>j)NA#nKh)6lo~8;|Jn!hl>1RVmtc&II@m} zDBRO{r^297oO(l--nFk_p`~A^@hOtH^@caSYac5=q~x4N1dZV|A{GIu?jXae7QVM? zesBHG+~YPtYtHbsP-OE;}UI>29$vVFq)b);*R2)e|+exl5r zZ`ZWZu`B_YCrql)Yq;t_ov1W>FWOkXGSEQ3Uy`rreIsX>_Z4(T5f3G2X@QT`e#G4_ zq1l%oP(0%bPm8LXRpE~5zmxb&3jxU7DD@!4M?v~R@%?ZbONaIpHU2rfV;|-({ReQ> z=$=yy%p7`BZ*7F$Bix2na7hRZx!4`6E3$)QpJ4iqb>hImH^Won5%28n#^vjF!Z9GD zot9${-aRJrrl5bIPgJhfP1h&kx(oNTHOG6QuO8Ajy#g{WWVX5cP(mu^NHL#JWXwHn z28}uXeAO-Benzwe1$Zz!NQ_x^p)@tz{#?rw^*YjC`R>(JbY*{WTKQBX z;A1SP!b4P74G+wp*u?t(}49kJ_=NBF z39TwUu$xHp=R)<(&J+i*PomyC%XQqc>x?JyDvdv<$tsTZ*Yw{4iDl} zS76w3%!%MSI*?l*-CT!yJGhTIo{RMic{ct?QPF*YEeb_j=iuxV#Pc0=?8N_GZ|{ml zkteIg`at74vU&_;a-VSceEg1e=9NY1qi9eCW3agF_h*4cKbU9V6^>~;hS*-e+O?K! zmFWcNWVmlTi-r?%L5awuT~ennM#$J45Q~~yczp}9hlc_e$NW*cTlmcZL6YP(GwMqM zEeJj&fY-Ioj!R8GN32F*J8(Ok^nD}$)T}IaLlCz|k_G)nt^70aCV>5>t3n&1fFK6cxWO)a+b7CT=?X2rz=|!o@ zYWdamq5?lGRB&y!Pv-YJQrlQ~AN^G?N z{MgoT^59&6{x|?|uC4@>u>5ngqw9lfb7Qdl@P^OWAm*zXnOf^w9RQ6u7dSjs0;&K% z;NjsJ;MLqz(cKW2_+(D?>3b)kU}|fdfq{urRGbf!jREc%B{KmqYk^k=7vS>uY)#Ez zfymW^df|TQgZYOPLHAzffdDu<)33 zZa<>mUnUab?4B?603I&@)Y<&L0P#s_xV_VZhhKi9pluzk{ zNZQ5*$E2?uuyu7R4l1?twm)@W)a;L2(Zt^7>S`%hftT5*!SQ#fcM!?TWNv;_ytk+s z0njqkx#D_Z6CbS8O-&ovF9v>+EeUjAq_{_?AwL^yu$ipRCuG1g_8qy@>j(i)Kzy)f zKTff5jyvbFWbzu)Vl(V~2E>FTO6zDlddbd7aj3+LT% z$AMV1r9NR*6k6-*x*r4$nEEQq@5ibtK!Z_vE1bj0bouwE29f(AMa_4f(GGwOK|JE1 z?_@EQ5Vy#ST5}}q3wn$WDkDl~6ib>8v?Lj;d)W)2zCC>0>wr(>jRs=)CaXJM+7T}# z2%J0Iw}8po%-J~R>PQp8qYzvM==faad0ffAHm|dmisor+SUCsp?kiDUTK}PsH(h(F zK^|zZ7c5l<@i1X?aFSxu3!Zwp&I4QarJ9cv(3vNVG2sNp+}l8<^7ZvG;dWKcIP*3^ zC6|P(jLltdm|DS#tma#3cdX()p#SM{h%Ky!h&QDWfs8Rgjizi1 zV~RJrwHV@koy3Bz9X(}YErk-AMd)OSCP;CwuU7k5Q{1kulP;|9>S)+l% zO@cs-qQtL;OasBPc(+W^q4_1e5;oY8+UehS%GU4W)ckR z-3va*(_Gb!V;F*A1HEYU-kqV$CoR@vo4@#u&?Fr+rr~C8lixFlgOtw!$!_}Y9%T!w zf^Itl6IlGsmo|jN#RHbBuq`{{0W3W%_S=$>yi3m(_e*xtg8~_WN9LGifo1Nr^uECp zsgr#`h7{VGRW7^NyVTi364e3?8dHj$IG3qkWpV22%Yi!xKF1>3s?aa!9;ASPazUG+ zknlKFHq+@uO3S+!?+ndThYbBUIC#;IIm{^H+^F;U_K7%f+7!lN$BrHh`%DzLhkeyk zV9y@E%G1>E`F)1R_7r&d`mjiB-kcLc4Ci?g6m+pyqws9K)1>Myy|Qp@%6e6_4dX+g zPN4BUv4DNNMgh`t^Y7_8I}I*dyG*pYLDR06N_~o@1%i2O(ukPyl|EDz=a2Y#2c|sC ze4*^zJtMaY$L@()3S}_OOoV$Q9-!wGJXHqFP{|_aYNJmMmL4JXyb~0<>xaR8F1(Rx z4St&(EPzI1epodK@Ju1hzH@D9Gt|8GxHhSY=4&2SJ~~NW*$rSaW7<&pyT4A5z&Hgi3k3h`i})?KoXHD~uYt7>hk@d^Ff`Z}v`m zlh6_f`rI+_+}eQ7kgk>(Egf+}{NTG*eL!tsnD5!9^BeF8t-_{wcq84NW~4%B%mR<4 z__)l*1X2vrNI@b+rT<9s)@np@v_g(D1fZpLwy*so*x6zNHRM2j?zwti1ba&^m zNSQrW)k?l=uMiFCp9VPm<$?51gtY*6mSMd}WXW?rOjSXo{imZe&-6+!45HoAdugMC z3o9XgJ6w>Q6Ifu05t!l0BS4D)v!Dd}T3Swx{Fl(<$G>FaerukZH+xnlR$C=jcaR%q z4MSPF_lEi)=fGZ?KQZ{dY|LcYht8URvG`C?t)y>k+7-I-XOrR4_12K0qpA8kimPB9OiqgWoNuW*uaN$9K180GrcgENSQ{LsP;a zQSuR}V8O&6`#wvAYae2Ij_Qg7!-8p?ZhS5w6EGH|yngW-&(p)!CuUlSa65%XoI4g8 zwob>07oKOnzU2)Kr8MG4Q0H+mSIK9sE-Tz=zx{Z%TgCI7!u5`n*ax61pS{w@aNK1G zgK_5-M@bSw#b#lwI*lbQUz28@I9G3+hT)7tk)?s6V5RC%{ikhDO>YWJ@?CU5uAHmf z`QlB~_Hb}T=Dw}|^H9b^jHSmNIIucqgeaW2Qnlsb<9eJEyWV}UnClg7I2g11AOH#hu^r0ca-;$hkHVsICF zZQb(`ACe^rJSFrR4jfr?8REj^2GTxD%pkcs%*U7^^}fT7po}XTZ)0XX!xwt!E$wVj z0jQ&t62{HUyr!_6DAFq^qVX39<;MEC-UZTF2?#kFNyT-D0y`t((Ad=^ddry~=8YEs z-bo9xtG$~^-W;(&kL7{<4%%EBJ=vY+u%FC^ypLfQ%I2Z9sDZVM=BWI|ebRfVN9m6t z6u+(JWp+V|N;7hek(}=0Wm;_pl(on40ieEWP1WKWswK(0_M+*tBaaHEY6vJ4I+K-y z6`~Gz+xq z4kR{}#;BTO#``i6`T2{Q35i0FL}E=DUQ-3fs4|X89%sdQbg(4Q-y}ThT~iq=Oh<@< z5OF$7$u_9ncv+MRo0BusD2b1agjNyw9wZ#9er@YeaF%GLtWt1hUb@Z$*H_Y2;5y!W zGLPmUKg;^kKQ>m=r_hiBt)G2Hr-jWLi@W()F@|=08k4=YI?`~TVx+!wa+4W}RS5iG zr0))1^z9=(#{+eURA!r>jjDSdt&0LID|@evDw=u8LOzyL0Toc4iWN?BiG8riK!Jy~sk=@9lDhPUtp-=!!@iWv7u^;hph**TmcSySycJ&?=*#!VvFt{e4K zyp&c!2xNBrFd{aiK?Q{xFZQ^iw}~VN55`DkSjQpi?#tS4_hH?ZOA1Z!zH)Oe1V|kR z68CIj@#k#FZbjYYrYU!(~2J9Ki4eM|$z9Pt&IkhFFn=WO6o@94Mm(C%Ys= zdolTdz)3_!AXIVAXXQI1x+^aX&)~iBr50bKGSN>mDe;aZO)OlWA&R@c(p4Td%Kn@vmbzQJjwej!qy(ll z5==!V?55kp{~15aUns+ik{r~E2_*S@18d<3V%>fg36>!<*UImES|SpQiX`b4J}bfQ zhLi!(k|82~TQVBd$zVY3U^L&x*M16BWn{XUtK(dMBRKIG^Gt8)(XplWE*N48Pr;nk zoAQ8HsbYK(oe9@zaHqDn*s8J!gH^lJM1BYznda3?5iV zt5Z^HH_PA-;mM<~sr-8u1QE)6k$nqJ3|WFG?*9~)X!jAc?u`B(d54S8x+%^h>U``~ z_vJ58{33qO$aF}L&jg273afb+5V&F|KY4N5%u3u_@0^v z^NORTt@dgMv$xaoXV~3~Dtz)|DsxRWj_|pvQY6=MV(pp#TVKX)rQ{e-pO59HH(ayn z2Qau%L0U~1l@&sc&o;w&0qQC^H7yi673te4NB>p*uj8Hdsz4-3bSu0NL?9=?Zi+0O zeo@O8$_S^h#v1XO7yLKVNI5C$ca95aoZ@2Ynk`eNoIZ6vZGU79xOF=^v!aTTKUP=U z4nz~5`htJLx;73PPCyXY!-)JYKP!a8yEGlkNU^2vRjoKeB5xd|%o-P?b$^Bjx1g4+ zsR*>R{#>h`&tqZ1kR}U;>H`eEueh!6sh`kDu z(;*&p!mX#PBrY`7Q&Q3;RxKb~`xDp)?G7D#pGh@Rb>f?9vMs{8a~$=;CEj%*z&@Wq zpN6u-vXi0U+^gj3C|Kgk{9d%~IquI)->-3*Gofu1IYPbRh4erl@0_0Vm7BOnmV<2I znRr9@N7QHts`S`2QHRqJ_#hHPG6{6LQ-=i_gL#gkqK(f;x1TyzNOIWOtQFOq4#kO} zGf@3Tl4Q}!@pLVO{@I;Fc9wz30!MQa)Ex3-h(#E96+>!yFHRhLF0V*_$3TWiPJXo_ z*fxfZ4%O<`I$qO7_?QxebDF+vHn^gbF|lD^ac)LEG4jOOD4l&rLh|@Ck0m(hcsP8Z zI1*yKXJal`glk zJ!Ai%P`|#a_u$L~pYg!Cau-iMsfo5aA~Ren_DucY-b3u4GnDFa)-Uqa_(=y++@u3@ zl?dDXkw2+O&Z6qI;aCCVYX{RLv#j}9KlKBzb*16_?UR)Urcwe{;zFRNa z{bFqEPpL_43*lqixZGu8dGxJm4mLlGzS25wO1yZ@ZF~Y8<@CI&qZ)A~5#j~5=7EOO zK7dV6$0KaGi%|TbB^H2OY#?Y8>ob1Ql(ByyF63nKga(4oDbyl2Q1`zWJBKD=m@qJ| zZQHhO+qP|c-?eSqwr$(CZRg7&IV3rx<~PhtO+DQ`LKCX(QhLG-ffokTTH1v1!<_1{ zCh-GS?7gBrfrS@eAmYvGOOr_YqG;KOK;pJoqmT8iFl86J2;Qx) zFWJs`ULDtXxKfBkgUw%d8?Ht7TD7yEjw)y-EZx}z@#8hw|2p7IjBRgmx32vm8g*WV z1#&%h0%5Vj=T|Y!Xbw_sh)nGp!d8;TTWfL*9~z|z%6JQvc|1?mJs{vAzsT5E$OgOQ zWtV%adeALKRW`y^`Lz_$d_>>K#}=7P-x6!tL8jeqz#KUjr)Xx1H9oB5DAp>6#{CXG zSk2PpR*V>#jVg1(gr2W=(eLiSd~$AeFYxxeyP+SVgBYS+5+nM{yEr8w)pH;si(lg+ zFYj;U1G$6}Fq~r&&Sr+vHj2*dmAmJ36DQZZHX&w4WOyl(3P0@rC-M7_W7YH`5e2@ z3F+Ag6`9~Qg46Gp`epOtdl1Y!y?LH^b>#tzfSE7bT>W3=iP!Rx)C^31AO5Ahy>IFanmOnBfI*`6O{-L5W=Q@ zTu|er@MI0C)m5lsm~(s{^>dnwy;j(s;KZVW><|MNMB-8{bO8_S`B$?fh=*5@_hKys zB^d*(Yiru?s)Z{wS~{Dpf-s2#Kvr)UUa%ET?XCDXe41p5%=+CfUB+!r2w8hX+nx^F zPlp%#K&rBSsuN%+GL;@Uzq6dqa{nYcy7?o*Eq2Hb`x7$#TcPV8Bm< zy^FC(BGi79uGBE?qO5k`S6&f4`Ddw5X4+c`O^$~?(S^)RCL>~gA*#gej1|c7;*x@D zQ`K$CbIW-M6Pm^(@sf0@>y8EjMA~AjKlip5O#Ur45?<5)`QzFEai0sMcn4Z=O@>tV zM4Mfd*zuc)*VW|w`UnSS11`f_^B+w3YI zCQRIvkcL0_H<&WJp93XC6?8%Y5&>&?4F9+Z>Czw%{B>}~m%Mwk5y|k@V_%xMmnvjD zkI~(IWWuW4E2heXE`Re2*9G(K?;{3|?|_3jiptHIo%LFw&-lU|pck`bXp z+dBH!3yY^`CMV_PW9x`YhW6`Lx;L0K`Rr`hl^VYqE6W#Xndh+ig69bLl6;2<=U0ET+3ZKCB9DD_5=~&Iwh@094Ztj6~*GB_g~g<(Vf-^Jk>s zqXcO0q=1U}s*=p51omhw9n0OOrvCHCwSFA0Cn?nBKI&YZf22FVdCo0FE0#~^7 ztvsGgK>$UguCjWkwhmS-IE#h!Xn5Fl8wTqG1IG?*ivKvo#evrA-fxR+m@sK3e#NZl z&NO$|;3}90zjOlW8V@tdE}sZst`+c z$hb(x%5D0f*8z9Bt_vF}o(iinqfwb~P;^@WQBcDOyrjdM)K&&ZA&(4?`#pHDGI^N9 z#yIDtfxn4C^^(l+FsuD5j<>b$J;Vk1?li*YMaYxJDu^RbdJo8prEi^|;3fq9>=Zh- zF_p0r$hLzE`{v4JPM4(3iUvIn&7!7dh6a{W z+cR9?HCHV7ab=F3cK(1)5iJQp#Y!2LD>jY(LxxMn5k*m4N7#|*(?#*Td)jw;$9ZRO zR|mgF{b&7VxXW1>RO}o!BJP54qU{PST|AE^@87k#k=(+<&SM&So|M1=Vp<#03t=jq7ICQ znscs=%Pp3IqDk4Maj>A*9wgm=q$a#JyAaY1;0x? zaW?Tc8Y=TdtOfF-O3p7nr&YhjIx@|9Z=qkeK$!0VBC(Wjnk!pLM7Wo!{zq zP@0trw=UFL1`LrW!03$Y^yd=U2qq<)T*kzOusukHHh^@QJzW@!!Ax&^z7o1r{;z~c z$aJ0#Te0ZGm}RcvM&#Y2_G1*jxYLRSvG?HjS_jBVa@y3FRwQ2K-(XU$_M12Jv(?9Z z4^MhxjYUy8imMpSgaDH@C{b>CHH~HHmU}*YGP<`RnK~9hoSKZb;1XTIk z%NCW@#R{xsF9pfI@nAcQk7=uT;)g54eB1?#pg8eYrFQR;uMvyS9;BvLB5 zc|<1nd-I6^g(VmF7)}pQXi=PL3$3NYb5vGDZszRm!sFs%mM&%-F)o z;2re~NS-_EJil|QTrxBrgHU?T?!ctF7dE&&(n^%Ali9xfb=)ucK~s3oLL|J$BT0M{ z_HK`a?t!t@aTmbxMF=0TZ;91=__>AjmH$q(9}9V-K!|lc1Dx?nA{Bf@7ZzucxvB0<;cMkPlHM4XcQr) zHj`unE0O_ca9XXZGY3ACfY^e1U;GFJxm_r!EUcmdqhoN))Wpmx#3Bjb;_&jrhQ^+z zJL^p0VEIRzF`~K2VH%Pv<&GP}kXZ9*{IDvY_2pY)a`^p7T)Q=ccAfS|KhiDh%F$xC zXu@Ih3c&sXV_)z~7jYC1EVp|-%BpEuV%A(XzHstUWP2P_J&X~7D%-7qim3FTW@Hnl;|EmnjBNPPWzZNjTunzxch6#CDfPBL>Wba|m2l`V|(IiR# zk%i*Q%;fb~K8qd#fmy84ulAYPcF~YrJkJh`!Xd7Br@bZ(GND>AcQq-htM$*ii1-S1 zypz4}fO~rhjypJ5mZq6;Fxz!tt%A}NfW8JPiANtrDn;$$$u-(Xvy~L3`h6E&bq44y;Mqo0JW+epK#6yKdya3Q!N1_R2jm&G&=V+|x>!zr%Q1lCqcXG=Oev5cAOa>>Q zIB2|^Av#)^;^+3Vrd2-^6sLMH`LecGI;xCPm!jA|m~5?QH19lC>rg$oUEg;g<|tAE zW6MF(bnkM{TqHCkOlRb6>ktRJuhOM9rlosxWdXYVr2eB3dEYU(p;df|i3KTRo`5;~ zS@I12XwR9%4SE~<^fvLN@tC6=gMhwye1Q`O4J%l_@MFC7RKc)BBG&CfZe4&Fgmm<~ zy854ncKo7ddh%5v&Y#dXm7{QYI2eAV+WpzjuN9PQc9&?)bpd!}vN~h1k_QJ?3dm)s zk*A>!hv>$KoT>;BHv6gm?PH1@iQ}B`Vp}Vgg9H@*QuB-teBzRDoQ>yLD^HpK+HTor zy8QBa#KFq+zsQ*X1DauEVq*Q@K+XR}&2$>OnC&33Vr{wQuD9B1 z<7&%S$J%mVthe5{*!<6R)_azHmwmU(c)RXzraFUVy{g0d`rAY)L4y0Q_EInQlm><- zdb%5cRZB6^ zR&RDSRjn4lN;%V?95B;u)NJ*2brwJtoEzNTTA3Q4udA!8zAFm5s40u!h+o#~^b|my z+WB8dOAe=ijHITxm?D$_ZAm#`JQH{(H})1_@{Y`PwJhM{YU!x}x--_lkRay%TYkX) zWk3tFA9`83=sz?77j}2IAMx~%rXXGcK_OicB}qkG0>aT^Ixq&XOu*xkKTGh=&imgX zHg?z6zcrUdh5lzP(|^wN zP^V_AcCUXI?0;i%{&vgLfEH&j{;}WY(f;`3B=mJOHDsc1eW35|nm`tp24=9$Amtsu zMvsEn)QX=N3cDXb>{O;)nSwU+H zR1HP*j||&qdBI(|=Vq0_hhkVfohX2@Nb1l<@DYURNQrPWPy)!<2?m!31My~XVJ9>r zZ$&xEkF26kF^BEFivPp&fljuW$}Ul`fkhU4a=zr%=633mX;!T3r<@>Vl$m@{tqh2^ z-ZTn|PT)AMN*b>B7qz{&+Y5Eaw3!7P4J5n;yv*Az*qu4SR=-6OItizHLvdKibuo}3};?WNA2}6<%utWe{7TOLchJU?Zu2!FHaI&N=Y3^GwgFV5WSZ8qBJOGm7&SH8Et3Mv}T1Etcd_6FX5&<$S* z7FWo_0E7FYhmH(cfp+s7Qew|det1!SVaZsE!gZ^ojdF`K)7=pqHkHtW_^|~d z&NRBDIJit>T|W$7zuW&I`KXpo!`(5>LGMAIBG_3;Tq&%B$J^E4KHVOm`Pfr81UA3rJzh)CT)#w^tx~dk+~_yxlf7BqHG}cC8T@xY5BYjO2_gvnjCuss zuF_qtNc*_Md-@ooHrg&xctK&I*^uMY7N6=WK)YEFPI`C8uZ^PLzJ>EQ0tIf(4CUp88xL zn0zqW6%?jKKyC?_kbsm8LPAQ8?8z*WZ+LKbLPn?KXH*?$rdB6VIyLAl_zi&4e~i#m z^ID3bJIyBKn$U}&fQaSX_XefFtn0gXr!8$=%(3-0DbPJYzRY1O;(>p1OHCbD#YsSerAQ({PyrbXQFllRc@8ZD1p79X?8p~X)&1L$ z8~A3#QuD2MyNG>6QP+x{!&*xe8;|p;XW(cu{lu3p{?3ykR61cIMG9b|prOf%jgNVL ziM$T-K+$%es%Wjqih@jd856$dn}+7xJlXm@-v>Gs?~#Qbn_FUR0SBcA0ph;(WYYJa z-6Eh^Fpr~v{djvBn?-$-u)-GiYg=9rcBXROJ2iTE^4>$@BU@Pmhh$IeXl?y_Ds8`# ze&NDH)L!=mo%khxR!Wn=UaOyL7zQE9)%)K;1ap&&N3=?@t;a)!U?uHqiTM!jVGU?? zJOMB%W&XX#qJl)6Lo)0g9%I^46^ePUz`|4Phk96+Vn(0J-$hvhJR<#$4~IJlSalSz0x@8c$GW_6hw*&y<*?nZKESsJ0`84k)H<9{Eo zWveoW;1+kxtyZcdjGf)>6*4)8jK>-`3Cb-$E1Pio2&xaxaok;Zsx3l9{rzA6PqY@h z_@ZLG1QuTNK*PHx%T0Q2-LxeoT)(kuuW;mqW?{#(V{H6+=09twi3{N)WQTv9Q#bPP zk_p4m%WfsHXx`Dq1RspK&Y_M`rNOs2EUu?v?bWNhuRh7;3XeP~bwhQcQIx1B`pPFLQl`H@4(WqsvRV)><}=W)Exdbp)>k zcOyP8w7xs#&P~bA;8`?X1FA91Gq9dYH@lBs%2J!r^tjsYfLAAI)basvcoBOJuB;z$ zWh!DP3QCZ2EVBvhd{NyA&5dvT(J%=aM9ClMCTx%Sqo6?;NKLi%K;iZpY=sGO^G<~SP!$|}G z4$B8^LBr*)8q2#aU4Lbi@Nc9`TO+(ftZq{)3wLU8H-ZLK%u#Q`p{CR@A;;1r;+;p) zmviU}YVZRu-$TE2Calzz{O7N8Vz!2uq^W>+V68N}~5D z0}ZD^kY7NT+3VXDR7~1zJ1qC&8=+=owJSV4ECjwl$I}Bp*>ff3?q(|B<; zXN=sFN*jy+A68r4_2Xyoz7z0wND|H`I&X$xy6({S!Dx3u1ZX{ z5G`fzj$Ie_;!~@K^m_ssxxA@w^x=0{<2O`8cu_O=CF_Y|ll88tx-r|pLhHzp^zQ{- zrD?M~cPw*ry#IvXFdLv+J%UN}h5XH7G)?9E4d$(Rm{pnkxG_y{-cy97jL$EIGw0D# ziVmuWHM`q^X3M87xR5KVHm}kc4*MdkvFiD30te69>`E8}-I$M1`fPxemPh5SmVVAa zD^Lr3f7dyubHDfX@2=EE*3i+SL~Q$?m_8Fq=BN>i;8K7?wcZv@hFup3*hDF z3y&B=lm_c_%g6lNA!EBZk#p=_qL`UB8ceGCfk+#wOO3bBWK^R1`0m%{UW}*#8j@*_T6vSn z3UK0ok3HSi(Xf_q-PbdRj0;h)u?Q<4+_>PES|d(%R;%h?^ybLw-Ik*W-(6@Mn-++w zRZ~eqG>e+4IZP`G8}nby!c9S4KKvS(bw5VoO{fosO)>dUqgJ8~tSOpN&vwDfQ zAMAeHwY&_FXxj8AKeu`lUt!v>Hzcv(b`W~X`B8Xb_c%^c>+iR%ikGExB*)+Bb~I}t z1*4&r3b_NtsD^ijYc}&a?$##ynjN2Ir8KJD_~lX?m9(rGIxf_&HuA4H%# zt(+1#?MG22TdYu+wx?L50EtZ~rEU*JvYSgZX$jTLv%>00vXIaO1|E7G`IWMhL+XLe zeFEj=OVUIpvYJ@W6UE@h2(54L^_jeRBELv||{=C4efcEdVngHEzJDArXd zha4LBTbb0Ru5uWuSbi_VVYqxSgC;fO^*E_q9<}(+U#*nU#{~Ps5Q8Ysegy`-Gep(@ z{Cf19-|JLM$_mfeol}6VPbr$0Ko{XvwkfRXIkZwvh7Vjy$U)AwJNb&`2sa1pr-b7R zJY73muhRYX<56@f^wiP^=38 zhUVT}pKC|EoGWJ}d<6Awp7r_iH$@~0p?LVwlu4Dul$2ydA%5+>_(6r9C_vL<`70?~uRu4Vdc|Uz{ zAX%C@{MC22`Lja)Zlv}zayoh~#NwIQHeP-;4S`fjs&r=R!8y<=c48TMbVOgW8=e;v z1IUIzhXh^fl9Vv-q8gUlu25cdIR>7f{$QIGx#;{nyFpEv6pm3omf5O}PUr6(RKs_! zB8=pZF?!YC+G^_@f;1iH4-YAC@eS2TohSSn8+J@KxYjF7)C7gU9tv$gT0dF(hVV3W zn1#u&1)v5yXr=qWnFQ|h`buE9j3wyOb@?MKqg=(+QYdXoS{q}|EA`Q0l|%Du8xG9E zPGvJl<*qoK+)ajrbd$~nmO&Pi{%w}Yh!FFk2(-R7vPsw}KXp58#D>9=R(Y@K2MmJ~ zXnkHq0)-g?5{c^TL|7huQiy}8PP~8{O7zer($Sv!r4Q1qH6`saloeI_#YWtVeR>L3 z*v^`_Z=bT54`bPR^%sR_oY`$hSN0YlowJ$~#1)bn-uWB*q45h~mH1XdV?)^L{zMX3 zrdQYqit&4JZpMH;^dKi}+@(VJToWEe&tLCyf6RAXf-NKbHd#w_5)@rUb;8!fpz~}s zBN)>&ktgaZtt9s8T7ji=$n3qpFVktyKslLVAJ=X~IaGzJTkS*>U{cb^fq<17%=Rpd zh0Y5WI&-l%I{OUCa*#x%#57@~a}R1KTU+7p~z}m>@U#q8Nn94@z*f zhegjV)osi2&@5HUe;KIVasOTh*nVvd$_>z+3=t4CG=5!s0#1e-g;4o%J zwe*8A?4|q+WB1p!X{V(aVQ5O(p4;C5L z79E}Yf>9sCl#QeCiQKS1Y?;0_6AC(!*Ap5(xG-tWlLs^}bLvXbo88&!>Pc z=l4U-%*IllPBiEpCE2))b8BZPoEi^VTg8*;lX2Hro2W5-V)l^l3V~8Eg}2@(WJGvSb;7UyAvs!|4!M?_)5NDNL@#5`7|s}c z>3PW}7^g9eWie%vAm_cv!Vpt119a5Ka&=N!O6;xO-1K_@M^z*lE^P%->~q<1xy6lm zwcuG8^YXdMQT?fsX7Xyh4|d65U8V@QXNb29mKOj1SD_BP2{Oar3$}fDUd?&PpKBX2 zE`m~HcyILL5W`?5jV-2|j77j{3iHM(1b>sX6u47$vCx!0!CFdC#UTLDpdBMQoQM&8 z(das-+`DzU%i{{ui5pd!qCpQN{;Ebj==X-n6Gx7IR4&VjD`j`_6GvA3ZXEa-sdd)t z!8}hU!S4{4NxF>4)iDFRX>?5L!-hU0t^+@%hIe8I__g8n47g)&>oXq#kuCbY_oh%} zxM-_c6d6ZK!fAutI>@}Sxh9W!kK%O;-n}QhJXLOc%E?Zgo88!Ml&w7at+SHC`V?wZ z7+W20q^gY>X>#J1j3i#D{Y)L<)~|>XK;W1ELOZm)>RWb{qkZ=o$<8uY&Bo``H%PB* zd^T=Lqx5kg5<}EqXIAOWOijr=JgY-FYGJ!rw(M%jmM2HuT*5tIsQD~6~Ullph>j4z`6Ud?(u@) z2os`GN3&10&Z{YN`T*blH8D&PtA$98ORGi-{~1eBv9Qnh07_Dqf%1j;cX7vmSCB^w zY<97vY~zF=GD3v@?7#z5hGMC$36)Q4OSCZphZdsbGGDE3nK)c5yGGEmiwaN4Jf_7Z z>BJ?XO|gGdrm%JsJMWhoV`}Q;#pM?vWDlKHWOocO9cb|sr)pyiSi;yXf%Sz&{JCXL zXbl1IY!KiaO-Q~En48}ULc*N1yw>5?n^jc`1yYg;d55Pql|i8OmmFC;ION5IP=5qY zHdHh3@AWa`3{Ef6SOzuu5F&eya@5L0Ta-zJezU zBy$vE)6aK%O4O6&l2`ntsaP3ypOk^M6_I5qQ_FhXga-Hmon6nSl=fWGvh%ZW<>s7b zu>Z0j%pb)~2H%8Nv`rwz6JZ*ie$yixMF z>_q~ObXQ*nr8MBPTZaLUazfIP-0S%54;Q{p4P7WCyq=-rIrDc9`>ce%bT%0MijB2=z%V|x!KYNp#b^zMBZ+8yyuPN%TR$dV2g)Slt4Rde8KSz3p!NIeP5hi~;7L;{zg>%)oieg?@5cY=xtd zC;ayfI2$Fkan=Q`hkaDDOmCWBpdg{K=ben7E*OxYRD-bxJ%hn$;-|+~IHX+_)zdpT zD5o_K^wWu&L-Ct)S3tJ(%JAzXZoKrQm2ZI(Y2R0~J;45BI7aPpHV4LT{Swt+?SJFT z<~Ozk>T&MS%E@gw2E;SmRP&B$?FgAucku3ZPhon+rh;1oT*n2C2*bS>onGgvKB@R~ zc2aw^;D2wlFr08JP{SOh7wh61ym#`KGBUd(a1Jhq?r2 zY$U`88eBO5RQ*QJ8l_+eK^L>&F|%-bcGRZG*^Sq-A>>d*;7$mv_|F^C-ua-MRGE+o zDhAg*{MfV+(sf{a=W9xryz)BL@HW;=s9A(%eCNq))9)@U#zV++>GKHz+h`@4d+Pta zkC_Fww=`eiZ{uMI?^dh|Cl;}_^F>i|*phBhnmDI1PLL2$386!Z()<% z5VDvh1}F6=%|AAF`L#jHMXM;6X(GzWBgJ;+!IvHfkA90CP|>AzU(FWd)f+!5KVyfH z_N=>8y691Hivan;>H6a`7xxXAoU^g0thfyPg2W0*Ygd=WtyPy+Y7)_J2i zCU(w)VYF}yPw6Lmab#R&Gl8m2DS`9mA-7HXffc%y56<{P2R==r@Vmg{Wv%5QxovXx zLw_18om>kOz6ius?hFVz`qHGr_-(^7L3@$-b@>=tU#sXYPx_)-Kl+jb(T;o3J+J2bdtDHi_Sa`a^fQ)l&YaA5mOB_7V#1Tr$rYU8l4w}bZ;KTlkWVr=!xA- zm2joaJYL*YG#<+JKQw>iNtOw^|g?Cl51AkhlR zuu&pke#e#$S$9?26GX@b%kgNsY!(}F)Giw32x6odOvVlsF^z({?wU!JZX}HOG7hfq zD_W+kd|jCSc=%uv(^Rtu4h<*h$O&HMr=w5FVeEz-BfX%Qghj z63A}bgp4`ESlUi#);I+GG^s$N&1K>86NajLW~@%pu@KIN@4Y&8D{cpiO>d%&>~VMEm}cr*>i4YLMVD^+}kJdlbEUg`&YmQ}V&jqDg}a z^0{1;Qe>Aw^yUJ*-jlGSENt~Q&lx5|C0FF>;E3JoMB8*g75zjU$;=ZZ*vgAQ+<^M3 z>v(wel!CU^#RK%!Fu=;$%_-6o?`5|k^ihRsTB-@1*Az*rfHgU0w(8dj8R8U#|NR)Y zd|dz&eNSlrB-DL6$NQ`)`2-zbf*T2E+~`QxS7N8k zlHS842X_7Nub%pe+qEi!SN9VS9*e;AB#-RMJ+WnO6M|;rEylCJMyEh*m!pw*{9!g6 z7EB?pOlW^d0c> zf-i}qB1h8X$6bBp)!jKEfyU&CQG-Cc`(%5JCmE;Nc@i~22MKwP^<7j7XF~g|AYeu1 z=hDQ9N3J*u^TUizzf0MKH1k}gQ1bBeuUP&I7IH<^x)&eZ#XYsxN+aex!LuyR=~w3P zaT_b<>a0wy!ddKL=6D%kSr#AA+FqrRQL&KfvWzJeKRxq%1&VEKGTO9|sDbK9M!XZ{ zxKt&%5`^~z2_dM`MpaZ&;qhHMam^7REooN?_l=)+`S+j&cQ4X`TZ>o$Mt(B5-n9_;3IWnP2LdtuXb;_u zv0in!R)#vPbP7icwgk{Ms)Qb;sl?@D^}wEAG2glTFl$?2?pIa8vQ%Lx2IX7K77bxN zg#IJxV_}_gf%v-j42xauJ9`WWqI9SX_aH@0YJ$iUPm{6`KW)zx{c~dXsN?Q^#B*3X zTp7=}`BzJAr@l}59@2=fRYifLc+eipy2Nt!WSoNlGu&COegUloyev)Vt^1+2Vgbkz z8_h^!osZ+$D0dvy+d2zcBV2|{2M8=7{0sRuo8Y%0kpOzSZXR1N50MmXyV@Rv0}e8C9KbE4neq+o zR-UnF;e*c;S(BG{S_x@4_Ii8@5okwYhvI0X^3-*$tU$h^IpU-?V5cPsdiA=g*m#@0buI#4mO08+IEfM%L0 z5!!E0Se!#?E_*kr&k%6Ec?ktMR?K$fYjy<~191N+*iy!>RdEC2=wM4z^6+6K%RqWd zkUH&PH@Tq=j~B6Afo-MKD%}F~Q9{tvaR}i-k(PVJ(>umGc zm|5u1#7?05ndnU`A9=6c1w_TJI#!OPtD-89IfiLk(-sB60XI%gTMD>%ram{GoAr+$ zpMcSQQs_$oj8*)EQW5?opewC_se6_AL%~AH!cj+6>CL{!6qFXtv()(yyTDHjt-tHR zPcskW9syTrvNeo#H?&qwlxT4Nq)VKYHM9b6HVGShCEw8*<7=6-78S1UoSuq zyAtGS4rudo(4){2XB&SH5+C{IhP!e|9fYAuKGx_&VGal3@5sVF(n~2~xq0}WMEq7V z#VG!nHd`U`-`i_X6onQ@+r?FMQsl7*3kQ?Fc2`kx8qI zSLBN`e%bq1Yynu1^02hJU;^ih;6#_InG!eKf4APhWi@+|ZYx#N8s^Gof9ZuOk0GYP zyK&IQn#=`|o#Zq_m-my@zKJoSntOe_F=xK}No(D`?LaIk0`brJNBr|Bk?jrRMG6Cw z=I9tyP8O-n?QDSTp>HmkKWHoJ_;B}hp)Q!vwNwW`jkeg8-X&YH@>$nJa%t-WZnf>w49FDCA;5qBe$yq^88ExPdKM~bJ(G^;IT24zS zsQe5%3SVl)RQIupjA)`os_VU!eam08%6`ybmaIBm7@5J_I!B z5KR*q*~QQx6Ui5Vti!050uIUWA@`n56bJKFS+1x@GYp=z9UI}t9&aJ6VR5?VUGu(} zg~<9CASQrCtq%DUE@1qcfD*V-O6f6{5k1lU)b-iIb{n_kmRT*D(#Ly6-dnrZxq(cx zUvDSJ*!NYq6X20Jx_~~o6JIqEIz$9J3aP`#J$ckOm3S!(vX`w zcI(yELfh4R%w9N)mcJqHM@icZlO-o@J)mLKx;6J@uobclBS%6$tTK$3V6 z8;ba|BsR~n{FD(vB@Gi8+_AB|#r8QPCzGy(wz*2onyL4fOOPb~+gUkKB4n%t_~$38 zn-PTJa%wp-y0i}8a%AX%=Yfl~$eppZ@D7y9~HI+kqQ)+2fZwN9uo$5Pve49mIjdb?u7cZ8H`%mB1J=PfPnLh*-k%py`H?3o zQZ4F~1Xug@4HPnZu`b9pexX>cm;^b_B-38DiW8X+eTc_EB|M%t@~XU9klbEe0O@h^yQOK*Tu+&*#C~^}e8ed?_Bp>@ z+tKVOP7Oku`Be$V zqjkBUOsxF0!cRmoJI;aa?Gg(VQv%D3RgF(Eeywm$sywL3hjJ<&o#Lyo#pZ^wL9P7U zab@PN@+KT5)_T>5oTy9~kY_OUhFwg_lDv?QVXOMumUh9?xYHhJLr*CxfCwT<_29A5 zfksY<+?6~jFwSPKEjj(>Nsxew?v^piMf)nVE}NRu*DO};LvyFg5+WCBBS`Z4+-k2) z+cag+i71wG?Z551)b$0NBDInFC~SFuQd{##o~!ga;vKuw;t{Y>dtK<(fp80JM-q@1 zsY@0(M`7fDDp|5?aOVuDnC+~`?Y}d$&T^O>Zf$Zltk8O1*VuTUp6 zQv16xdu+`ky0}}aLCM+v;SE!}>puA;j%BDaW4u|pqTkNwOg1zOIaRbnh=ykBaiNH+ z94!?{m)}_ze_pBy!B6{GlK!mHloEo{fg2VoCgq>A!{GK2-gRsI>tN`Km`H!=n(~0! zbZ^in1C>Kf?y8mIfw`0=%lZXt1Qt6NxHUrNC=~WS@H=~ak}{y9EBG8P6ZeGG+i7I#-%{eCI&|43HTLujegoH2NhRi{@xlk|;oxc^(@2qww zkk!vdjbE-V4t2EW#?*Hmzl;l9z}AxIZlIIIPwO-^+y$ooA+QN3Lhc~9n5u&xlv$uB z*S$r%Bxs-3Kd%sZV<-?(KT8+STYr#xl~w`=aj)7J`Cq)9Z$xIh-s&Tk5;t+LK9^8b zU|e)(uNTA8v!TMyPD&=GeyVblg85O7;zE_IinChB$EHa*7UdrKt{bvg0fEDSb#)?n zcJAASTG!Uy)X^(7v3#^3Os@cWjMAQJVaSPK9F#z1W${i>E>emCwr{`x!JWA?A8xK%wW2Dc zVns$iS!+kc-uq`;wW*k0yWe{Yf+jx2JLEpnA?Dd1*i_SYu`df8=sL{5qKT-_0*SVk?f@6I z|Ji=4bs=pws#k}0cVmAd+2}dDN8n55-EnFxm~D0iAK$8LRBF5nIR(L#_Vp?$pW52t z=_wDH8PC=fOpRsE+>s+rk`e~qn8`DTJ}m1Cwry4PV-hTNE+EPHxWw)U=5M6;RsRmm ze3$TEFz3C*MXs>CSuOcO>T6yGAKGY8pN{ssE=YFhwgFv6F?Uhn@F6k=3@1NnMdF1k zNgYhv94jKI>?u?q*Feps0aNJjX?qAX%^>Cu4#jAx=k55~%BCHZX02;AB&KKH6ZZ^j zZ9>31%WT5RKmI~;JFY-1Ww+=y>F$49yJp^;Q#@265j9^S`Be8oqr0%yyo-!>xT&^O zKb4>=VowYu%JJ4ed z%g&w9bu17`CcbIF@D(fy=`I)-_9s@VN6pb9R_bdofJZ3kT?&LSp+oZcA8vbVyW;;0 zQ(>h4_b`=zAzc5BUnFN}spRy-X_qBnpr`)_;o{)vM8M9$_8&1S1WZgnko12Y{|R{c zuN?0GmfCewOUD+Q4b6MGW=9JdW%uUY2oXg*tDS;%6{~L6^!!aAo~*gK-dNmW{i0^u zk-gB(SfUwA&eb2(5H?iE0q(l1^I03NFcC@eq$s!sdo|b`gJ2Rjz3Okbl=eyIM93x+d-&r(tv_pFfAxI%Z=tjZfIEbP0(LAk+ zUiFq1az6_TBRTjM(fWoL*}bHReyk3x*3mc`Rn_WYMpnI1v=7?q)->=`-IfPRVI@vI zF-T?@HtH{s%#C0ny)`$RD0Jv)o_|ldLYqgoj0opRz`xFmnW2ogFLeOvL@3}? z2poV}!6fE?^wrQJ7A1iLgVh`NdeN|!$4xb%i~ayb1NSdMHDIfUMUyn|lZT@LTlDY! za*kv$_=xLbF~QVVTr%Xo%(I2=@T17j4W?D-|EMzA9z;S<{ksL1!-ua-mRoJ5(3K`@ zHX-2bb#I_?wH;N`16Q8f@aoRPM^o`o>7nDThnr=&QQ9EzSdB+BWTDKs6Fkg`$)G_E zGKRX-`OufQ2z~p}RvIJnxOrhrTCTP?ws$lDq%+b`CR|vumuU<3vW$;pWc+|2s;D@N zRh29-6q+COm2Xax7;tz>&9q!Riv+!AKwig3j)zQzM>hr{wY()!VVBzwwPYPSw|AmP zzzLR4QzME+jqm1wY2+hLAaM4T4?@PI)*jHhYvS~PboF9DUhGT0B`IQCQ}irsY65j? zK-|nnyt=n&2M;HXLS&k-HmAu9GcsAjqltAU~x%z0OgF7LWn2r-bSQNskHa#>5@x3uY z6+=L(9t$e4FTGnN)6*or9YZ%Y{lXx=Y1Z##hzG zj>zJy7b|D=FLnjoE>4t1iO&(mtg7=pb2kO}a^Rh^k_pf(MPpYVEA$9u?Jum5>yU}`iN!oJ5 z=lcA`Dq)Qty9gT3@Pk$67-Cu>;6jXna@Mcx;0DgmJv?~7gt|}g!8Qt9wS`2%Y|1xG)gwG7tMmwICMDk1TjGPW}o^S6!MKvD%qW4=uigL4qbcc1)+ z6u6-3GO%}QH*nqE{O=5S-&>w|L<_?(0MgaveQcUZ*>%H$1fXsm*ZI=NebJg!S)VKF*Q*Y7t#N{XB|iU-r0