xzcat.c 81 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136
  1. /*
  2. * Simple XZ decoder command line tool
  3. *
  4. * Author: Lasse Collin <lasse.collin@tukaani.org>
  5. *
  6. * This file has been put into the public domain.
  7. * You can do whatever you want with this file.
  8. * Modified for toybox by Isaac Dunham
  9. USE_XZCAT(NEWTOY(xzcat, NULL, TOYFLAG_USR|TOYFLAG_BIN))
  10. config XZCAT
  11. bool "xzcat"
  12. default n
  13. help
  14. usage: xzcat [filename...]
  15. Decompress listed files to stdout. Use stdin if no files listed.
  16. */
  17. #define FOR_xzcat
  18. #include "toys.h"
  19. // BEGIN xz.h
  20. /**
  21. * enum xz_ret - Return codes
  22. * @XZ_OK: Everything is OK so far. More input or more
  23. * output space is required to continue.
  24. * @XZ_STREAM_END: Operation finished successfully.
  25. * @XZ_UNSUPPORTED_CHECK: Integrity check type is not supported. Decoding
  26. * is still possible in multi-call mode by simply
  27. * calling xz_dec_run() again.
  28. * Note that this return value is used only if
  29. * XZ_DEC_ANY_CHECK was defined at build time,
  30. * which is not used in the kernel. Unsupported
  31. * check types return XZ_OPTIONS_ERROR if
  32. * XZ_DEC_ANY_CHECK was not defined at build time.
  33. * @XZ_MEM_ERROR: Allocating memory failed. The amount of memory
  34. * that was tried to be allocated was no more than the
  35. * dict_max argument given to xz_dec_init().
  36. * @XZ_MEMLIMIT_ERROR: A bigger LZMA2 dictionary would be needed than
  37. * allowed by the dict_max argument given to
  38. * xz_dec_init().
  39. * @XZ_FORMAT_ERROR: File format was not recognized (wrong magic
  40. * bytes).
  41. * @XZ_OPTIONS_ERROR: This implementation doesn't support the requested
  42. * compression options. In the decoder this means
  43. * that the header CRC32 matches, but the header
  44. * itself specifies something that we don't support.
  45. * @XZ_DATA_ERROR: Compressed data is corrupt.
  46. * @XZ_BUF_ERROR: Cannot make any progress. Details are slightly
  47. * different between multi-call and single-call
  48. * mode; more information below.
  49. *
  50. * XZ_BUF_ERROR is returned when two consecutive calls to XZ code cannot
  51. * consume any input and cannot produce any new output. This happens when
  52. * there is no new input available, or the output buffer is full while at
  53. * least one output byte is still pending. Assuming your code is not buggy,
  54. * you can get this error only when decoding a compressed stream that is
  55. * truncated or otherwise corrupt.
  56. */
  57. enum xz_ret {
  58. XZ_OK,
  59. XZ_STREAM_END,
  60. XZ_UNSUPPORTED_CHECK,
  61. XZ_MEM_ERROR,
  62. XZ_MEMLIMIT_ERROR,
  63. XZ_FORMAT_ERROR,
  64. XZ_OPTIONS_ERROR,
  65. XZ_DATA_ERROR,
  66. XZ_BUF_ERROR
  67. };
  68. /**
  69. * struct xz_buf - Passing input and output buffers to XZ code
  70. * @in: Beginning of the input buffer. This may be NULL if and only
  71. * if in_pos is equal to in_size.
  72. * @in_pos: Current position in the input buffer. This must not exceed
  73. * in_size.
  74. * @in_size: Size of the input buffer
  75. * @out: Beginning of the output buffer. This may be NULL if and only
  76. * if out_pos is equal to out_size.
  77. * @out_pos: Current position in the output buffer. This must not exceed
  78. * out_size.
  79. * @out_size: Size of the output buffer
  80. *
  81. * Only the contents of the output buffer from out[out_pos] onward, and
  82. * the variables in_pos and out_pos are modified by the XZ code.
  83. */
  84. struct xz_buf {
  85. const uint8_t *in;
  86. size_t in_pos;
  87. size_t in_size;
  88. uint8_t *out;
  89. size_t out_pos;
  90. size_t out_size;
  91. };
  92. /**
  93. * struct xz_dec - Opaque type to hold the XZ decoder state
  94. */
  95. struct xz_dec;
  96. /**
  97. * xz_dec_init() - Allocate and initialize a XZ decoder state
  98. * @mode: Operation mode
  99. * @dict_max: Maximum size of the LZMA2 dictionary (history buffer) for
  100. * multi-call decoding. LZMA2 dictionary is always 2^n bytes
  101. * or 2^n + 2^(n-1) bytes (the latter sizes are less common
  102. * in practice), so other values for dict_max don't make sense.
  103. * In the kernel, dictionary sizes of 64 KiB, 128 KiB, 256 KiB,
  104. * 512 KiB, and 1 MiB are probably the only reasonable values,
  105. * except for kernel and initramfs images where a bigger
  106. * dictionary can be fine and useful.
  107. *
  108. * dict_max specifies the maximum allowed dictionary size that xz_dec_run()
  109. * may allocate once it has parsed the dictionary size from the stream
  110. * headers. This way excessive allocations can be avoided while still
  111. * limiting the maximum memory usage to a sane value to prevent running the
  112. * system out of memory when decompressing streams from untrusted sources.
  113. *
  114. * On success, xz_dec_init() returns a pointer to struct xz_dec, which is
  115. * ready to be used with xz_dec_run(). If memory allocation fails,
  116. * xz_dec_init() returns NULL.
  117. */
  118. struct xz_dec *xz_dec_init(uint32_t dict_max);
  119. /**
  120. * xz_dec_run() - Run the XZ decoder
  121. * @s: Decoder state allocated using xz_dec_init()
  122. * @b: Input and output buffers
  123. *
  124. * The possible return values depend on build options and operation mode.
  125. * See enum xz_ret for details.
  126. *
  127. * Note that if an error occurs in single-call mode (return value is not
  128. * XZ_STREAM_END), b->in_pos and b->out_pos are not modified and the
  129. * contents of the output buffer from b->out[b->out_pos] onward are
  130. * undefined. This is true even after XZ_BUF_ERROR, because with some filter
  131. * chains, there may be a second pass over the output buffer, and this pass
  132. * cannot be properly done if the output buffer is truncated. Thus, you
  133. * cannot give the single-call decoder a too small buffer and then expect to
  134. * get that amount valid data from the beginning of the stream. You must use
  135. * the multi-call decoder if you don't want to uncompress the whole stream.
  136. */
  137. enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
  138. /**
  139. * xz_dec_reset() - Reset an already allocated decoder state
  140. * @s: Decoder state allocated using xz_dec_init()
  141. *
  142. * This function can be used to reset the multi-call decoder state without
  143. * freeing and reallocating memory with xz_dec_end() and xz_dec_init().
  144. *
  145. * In single-call mode, xz_dec_reset() is always called in the beginning of
  146. * xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in
  147. * multi-call mode.
  148. */
  149. void xz_dec_reset(struct xz_dec *s);
  150. /**
  151. * xz_dec_end() - Free the memory allocated for the decoder state
  152. * @s: Decoder state allocated using xz_dec_init(). If s is NULL,
  153. * this function does nothing.
  154. */
  155. void xz_dec_end(struct xz_dec *s);
  156. /*
  157. * Update CRC32 value using the polynomial from IEEE-802.3. To start a new
  158. * calculation, the third argument must be zero. To continue the calculation,
  159. * the previously returned value is passed as the third argument.
  160. */
  161. static uint32_t xz_crc32_table[256];
  162. uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc)
  163. {
  164. crc = ~crc;
  165. while (size != 0) {
  166. crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8);
  167. --size;
  168. }
  169. return ~crc;
  170. }
  171. static uint64_t xz_crc64_table[256];
  172. // END xz.h
  173. static uint8_t in[BUFSIZ];
  174. static uint8_t out[BUFSIZ];
  175. void do_xzcat(int fd, char *name)
  176. {
  177. struct xz_buf b;
  178. struct xz_dec *s;
  179. enum xz_ret ret;
  180. const char *msg;
  181. crc_init(xz_crc32_table, 1);
  182. const uint64_t poly = 0xC96C5795D7870F42ULL;
  183. uint32_t i;
  184. uint32_t j;
  185. uint64_t r;
  186. /* initialize CRC64 table*/
  187. for (i = 0; i < 256; ++i) {
  188. r = i;
  189. for (j = 0; j < 8; ++j)
  190. r = (r >> 1) ^ (poly & ~((r & 1) - 1));
  191. xz_crc64_table[i] = r;
  192. }
  193. /*
  194. * Support up to 64 MiB dictionary. The actually needed memory
  195. * is allocated once the headers have been parsed.
  196. */
  197. s = xz_dec_init(1 << 26);
  198. if (s == NULL) {
  199. msg = "Memory allocation failed\n";
  200. goto error;
  201. }
  202. b.in = in;
  203. b.in_pos = 0;
  204. b.in_size = 0;
  205. b.out = out;
  206. b.out_pos = 0;
  207. b.out_size = BUFSIZ;
  208. for (;;) {
  209. if (b.in_pos == b.in_size) {
  210. b.in_size = read(fd, in, sizeof(in));
  211. b.in_pos = 0;
  212. }
  213. ret = xz_dec_run(s, &b);
  214. if (b.out_pos == sizeof(out)) {
  215. if (fwrite(out, 1, b.out_pos, stdout) != b.out_pos) {
  216. msg = "Write error\n";
  217. goto error;
  218. }
  219. b.out_pos = 0;
  220. }
  221. if (ret == XZ_OK)
  222. continue;
  223. if (ret == XZ_UNSUPPORTED_CHECK)
  224. continue;
  225. if (fwrite(out, 1, b.out_pos, stdout) != b.out_pos) {
  226. msg = "Write error\n";
  227. goto error;
  228. }
  229. switch (ret) {
  230. case XZ_STREAM_END:
  231. xz_dec_end(s);
  232. return;
  233. case XZ_MEM_ERROR:
  234. msg = "Memory allocation failed\n";
  235. goto error;
  236. case XZ_MEMLIMIT_ERROR:
  237. msg = "Memory usage limit reached\n";
  238. goto error;
  239. case XZ_FORMAT_ERROR:
  240. msg = "Not a .xz file\n";
  241. goto error;
  242. case XZ_OPTIONS_ERROR:
  243. msg = "Unsupported options in the .xz headers\n";
  244. goto error;
  245. case XZ_DATA_ERROR:
  246. case XZ_BUF_ERROR:
  247. msg = "File is corrupt\n";
  248. goto error;
  249. default:
  250. msg = "Bug!\n";
  251. goto error;
  252. }
  253. }
  254. error:
  255. xz_dec_end(s);
  256. error_exit("%s", msg);
  257. }
  258. void xzcat_main(void)
  259. {
  260. loopfiles(toys.optargs, do_xzcat);
  261. }
  262. // BEGIN xz_private.h
  263. /* Uncomment as needed to enable BCJ filter decoders.
  264. * These cost about 2.5 k when all are enabled; SPARC and IA64 make 0.7 k
  265. * */
  266. #define XZ_DEC_X86
  267. #define XZ_DEC_POWERPC
  268. #define XZ_DEC_IA64
  269. #define XZ_DEC_ARM
  270. #define XZ_DEC_ARMTHUMB
  271. #define XZ_DEC_SPARC
  272. #define memeq(a, b, size) (memcmp(a, b, size) == 0)
  273. /* Inline functions to access unaligned unsigned 32-bit integers */
  274. #ifndef get_unaligned_le32
  275. static inline uint32_t get_unaligned_le32(const uint8_t *buf)
  276. {
  277. return (uint32_t)buf[0]
  278. | ((uint32_t)buf[1] << 8)
  279. | ((uint32_t)buf[2] << 16)
  280. | ((uint32_t)buf[3] << 24);
  281. }
  282. #endif
  283. #ifndef get_unaligned_be32
  284. static inline uint32_t get_unaligned_be32(const uint8_t *buf)
  285. {
  286. return (uint32_t)(buf[0] << 24)
  287. | ((uint32_t)buf[1] << 16)
  288. | ((uint32_t)buf[2] << 8)
  289. | (uint32_t)buf[3];
  290. }
  291. #endif
  292. #ifndef put_unaligned_le32
  293. static inline void put_unaligned_le32(uint32_t val, uint8_t *buf)
  294. {
  295. buf[0] = (uint8_t)val;
  296. buf[1] = (uint8_t)(val >> 8);
  297. buf[2] = (uint8_t)(val >> 16);
  298. buf[3] = (uint8_t)(val >> 24);
  299. }
  300. #endif
  301. #ifndef put_unaligned_be32
  302. static inline void put_unaligned_be32(uint32_t val, uint8_t *buf)
  303. {
  304. buf[0] = (uint8_t)(val >> 24);
  305. buf[1] = (uint8_t)(val >> 16);
  306. buf[2] = (uint8_t)(val >> 8);
  307. buf[3] = (uint8_t)val;
  308. }
  309. #endif
  310. /*
  311. * Use get_unaligned_le32() also for aligned access for simplicity. On
  312. * little endian systems, #define get_le32(ptr) (*(const uint32_t *)(ptr))
  313. * could save a few bytes in code size.
  314. */
  315. #ifndef get_le32
  316. # define get_le32 get_unaligned_le32
  317. #endif
  318. /*
  319. * If any of the BCJ filter decoders are wanted, define XZ_DEC_BCJ.
  320. * XZ_DEC_BCJ is used to enable generic support for BCJ decoders.
  321. */
  322. #ifndef XZ_DEC_BCJ
  323. # if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) \
  324. || defined(XZ_DEC_IA64) || defined(XZ_DEC_ARM) \
  325. || defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) \
  326. || defined(XZ_DEC_SPARC)
  327. # define XZ_DEC_BCJ
  328. # endif
  329. #endif
  330. /*
  331. * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
  332. * before calling xz_dec_lzma2_run().
  333. */
  334. struct xz_dec_lzma2 *xz_dec_lzma2_create(uint32_t dict_max);
  335. /*
  336. * Decode the LZMA2 properties (one byte) and reset the decoder. Return
  337. * XZ_OK on success, XZ_MEMLIMIT_ERROR if the preallocated dictionary is not
  338. * big enough, and XZ_OPTIONS_ERROR if props indicates something that this
  339. * decoder doesn't support.
  340. */
  341. enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s,
  342. uint8_t props);
  343. /* Decode raw LZMA2 stream from b->in to b->out. */
  344. enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
  345. struct xz_buf *b);
  346. // END "xz_private.h"
  347. /*
  348. * Branch/Call/Jump (BCJ) filter decoders
  349. * The rest of the code is inside this ifdef. It makes things a little more
  350. * convenient when building without support for any BCJ filters.
  351. */
  352. #ifdef XZ_DEC_BCJ
  353. struct xz_dec_bcj {
  354. /* Type of the BCJ filter being used */
  355. enum {
  356. BCJ_X86 = 4, /* x86 or x86-64 */
  357. BCJ_POWERPC = 5, /* Big endian only */
  358. BCJ_IA64 = 6, /* Big or little endian */
  359. BCJ_ARM = 7, /* Little endian only */
  360. BCJ_ARMTHUMB = 8, /* Little endian only */
  361. BCJ_SPARC = 9 /* Big or little endian */
  362. } type;
  363. /*
  364. * Return value of the next filter in the chain. We need to preserve
  365. * this information across calls, because we must not call the next
  366. * filter anymore once it has returned XZ_STREAM_END.
  367. */
  368. enum xz_ret ret;
  369. /*
  370. * Absolute position relative to the beginning of the uncompressed
  371. * data (in a single .xz Block). We care only about the lowest 32
  372. * bits so this doesn't need to be uint64_t even with big files.
  373. */
  374. uint32_t pos;
  375. /* x86 filter state */
  376. uint32_t x86_prev_mask;
  377. /* Temporary space to hold the variables from struct xz_buf */
  378. uint8_t *out;
  379. size_t out_pos;
  380. size_t out_size;
  381. struct {
  382. /* Amount of already filtered data in the beginning of buf */
  383. size_t filtered;
  384. /* Total amount of data currently stored in buf */
  385. size_t size;
  386. /*
  387. * Buffer to hold a mix of filtered and unfiltered data. This
  388. * needs to be big enough to hold Alignment + 2 * Look-ahead:
  389. *
  390. * Type Alignment Look-ahead
  391. * x86 1 4
  392. * PowerPC 4 0
  393. * IA-64 16 0
  394. * ARM 4 0
  395. * ARM-Thumb 2 2
  396. * SPARC 4 0
  397. */
  398. uint8_t buf[16];
  399. } temp;
  400. };
  401. /*
  402. * Decode the Filter ID of a BCJ filter. This implementation doesn't
  403. * support custom start offsets, so no decoding of Filter Properties
  404. * is needed. Returns XZ_OK if the given Filter ID is supported.
  405. * Otherwise XZ_OPTIONS_ERROR is returned.
  406. */
  407. enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id);
  408. /*
  409. * Decode raw BCJ + LZMA2 stream. This must be used only if there actually is
  410. * a BCJ filter in the chain. If the chain has only LZMA2, xz_dec_lzma2_run()
  411. * must be called directly.
  412. */
  413. enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
  414. struct xz_dec_lzma2 *lzma2,
  415. struct xz_buf *b);
  416. #ifdef XZ_DEC_X86
  417. /*
  418. * This is used to test the most significant byte of a memory address
  419. * in an x86 instruction.
  420. */
  421. static inline int bcj_x86_test_msbyte(uint8_t b)
  422. {
  423. return b == 0x00 || b == 0xFF;
  424. }
  425. static size_t bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
  426. {
  427. static const int mask_to_allowed_status[8]
  428. = { 1,1,1,0,1,0,0,0 };
  429. static const uint8_t mask_to_bit_num[8] = { 0, 1, 2, 2, 3, 3, 3, 3 };
  430. size_t i;
  431. size_t prev_pos = (size_t)-1;
  432. uint32_t prev_mask = s->x86_prev_mask;
  433. uint32_t src;
  434. uint32_t dest;
  435. uint32_t j;
  436. uint8_t b;
  437. if (size <= 4)
  438. return 0;
  439. size -= 4;
  440. for (i = 0; i < size; ++i) {
  441. if ((buf[i] & 0xFE) != 0xE8)
  442. continue;
  443. prev_pos = i - prev_pos;
  444. if (prev_pos > 3) {
  445. prev_mask = 0;
  446. } else {
  447. prev_mask = (prev_mask << (prev_pos - 1)) & 7;
  448. if (prev_mask != 0) {
  449. b = buf[i + 4 - mask_to_bit_num[prev_mask]];
  450. if (!mask_to_allowed_status[prev_mask]
  451. || bcj_x86_test_msbyte(b)) {
  452. prev_pos = i;
  453. prev_mask = (prev_mask << 1) | 1;
  454. continue;
  455. }
  456. }
  457. }
  458. prev_pos = i;
  459. if (bcj_x86_test_msbyte(buf[i + 4])) {
  460. src = get_unaligned_le32(buf + i + 1);
  461. for (;;) {
  462. dest = src - (s->pos + (uint32_t)i + 5);
  463. if (prev_mask == 0)
  464. break;
  465. j = mask_to_bit_num[prev_mask] * 8;
  466. b = (uint8_t)(dest >> (24 - j));
  467. if (!bcj_x86_test_msbyte(b))
  468. break;
  469. src = dest ^ (((uint32_t)1 << (32 - j)) - 1);
  470. }
  471. dest &= 0x01FFFFFF;
  472. dest |= (uint32_t)0 - (dest & 0x01000000);
  473. put_unaligned_le32(dest, buf + i + 1);
  474. i += 4;
  475. } else {
  476. prev_mask = (prev_mask << 1) | 1;
  477. }
  478. }
  479. prev_pos = i - prev_pos;
  480. s->x86_prev_mask = prev_pos > 3 ? 0 : prev_mask << (prev_pos - 1);
  481. return i;
  482. }
  483. #endif
  484. #ifdef XZ_DEC_POWERPC
  485. static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
  486. {
  487. size_t i;
  488. uint32_t instr;
  489. for (i = 0; i + 4 <= size; i += 4) {
  490. instr = get_unaligned_be32(buf + i);
  491. if ((instr & 0xFC000003) == 0x48000001) {
  492. instr &= 0x03FFFFFC;
  493. instr -= s->pos + (uint32_t)i;
  494. instr &= 0x03FFFFFC;
  495. instr |= 0x48000001;
  496. put_unaligned_be32(instr, buf + i);
  497. }
  498. }
  499. return i;
  500. }
  501. #endif
  502. #ifdef XZ_DEC_IA64
  503. static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
  504. {
  505. static const uint8_t branch_table[32] = {
  506. 0, 0, 0, 0, 0, 0, 0, 0,
  507. 0, 0, 0, 0, 0, 0, 0, 0,
  508. 4, 4, 6, 6, 0, 0, 7, 7,
  509. 4, 4, 0, 0, 4, 4, 0, 0
  510. };
  511. /*
  512. * The local variables take a little bit stack space, but it's less
  513. * than what LZMA2 decoder takes, so it doesn't make sense to reduce
  514. * stack usage here without doing that for the LZMA2 decoder too.
  515. */
  516. /* Loop counters */
  517. size_t i;
  518. size_t j;
  519. /* Instruction slot (0, 1, or 2) in the 128-bit instruction word */
  520. uint32_t slot;
  521. /* Bitwise offset of the instruction indicated by slot */
  522. uint32_t bit_pos;
  523. /* bit_pos split into byte and bit parts */
  524. uint32_t byte_pos;
  525. uint32_t bit_res;
  526. /* Address part of an instruction */
  527. uint32_t addr;
  528. /* Mask used to detect which instructions to convert */
  529. uint32_t mask;
  530. /* 41-bit instruction stored somewhere in the lowest 48 bits */
  531. uint64_t instr;
  532. /* Instruction normalized with bit_res for easier manipulation */
  533. uint64_t norm;
  534. for (i = 0; i + 16 <= size; i += 16) {
  535. mask = branch_table[buf[i] & 0x1F];
  536. for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) {
  537. if (((mask >> slot) & 1) == 0)
  538. continue;
  539. byte_pos = bit_pos >> 3;
  540. bit_res = bit_pos & 7;
  541. instr = 0;
  542. for (j = 0; j < 6; ++j)
  543. instr |= (uint64_t)(buf[i + j + byte_pos])
  544. << (8 * j);
  545. norm = instr >> bit_res;
  546. if (((norm >> 37) & 0x0F) == 0x05
  547. && ((norm >> 9) & 0x07) == 0) {
  548. addr = (norm >> 13) & 0x0FFFFF;
  549. addr |= ((uint32_t)(norm >> 36) & 1) << 20;
  550. addr <<= 4;
  551. addr -= s->pos + (uint32_t)i;
  552. addr >>= 4;
  553. norm &= ~((uint64_t)0x8FFFFF << 13);
  554. norm |= (uint64_t)(addr & 0x0FFFFF) << 13;
  555. norm |= (uint64_t)(addr & 0x100000)
  556. << (36 - 20);
  557. instr &= (1 << bit_res) - 1;
  558. instr |= norm << bit_res;
  559. for (j = 0; j < 6; j++)
  560. buf[i + j + byte_pos]
  561. = (uint8_t)(instr >> (8 * j));
  562. }
  563. }
  564. }
  565. return i;
  566. }
  567. #endif
  568. #ifdef XZ_DEC_ARM
  569. static size_t bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
  570. {
  571. size_t i;
  572. uint32_t addr;
  573. for (i = 0; i + 4 <= size; i += 4) {
  574. if (buf[i + 3] == 0xEB) {
  575. addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8)
  576. | ((uint32_t)buf[i + 2] << 16);
  577. addr <<= 2;
  578. addr -= s->pos + (uint32_t)i + 8;
  579. addr >>= 2;
  580. buf[i] = (uint8_t)addr;
  581. buf[i + 1] = (uint8_t)(addr >> 8);
  582. buf[i + 2] = (uint8_t)(addr >> 16);
  583. }
  584. }
  585. return i;
  586. }
  587. #endif
  588. #ifdef XZ_DEC_ARMTHUMB
  589. static size_t bcj_armthumb(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
  590. {
  591. size_t i;
  592. uint32_t addr;
  593. for (i = 0; i + 4 <= size; i += 2) {
  594. if ((buf[i + 1] & 0xF8) == 0xF0
  595. && (buf[i + 3] & 0xF8) == 0xF8) {
  596. addr = (((uint32_t)buf[i + 1] & 0x07) << 19)
  597. | ((uint32_t)buf[i] << 11)
  598. | (((uint32_t)buf[i + 3] & 0x07) << 8)
  599. | (uint32_t)buf[i + 2];
  600. addr <<= 1;
  601. addr -= s->pos + (uint32_t)i + 4;
  602. addr >>= 1;
  603. buf[i + 1] = (uint8_t)(0xF0 | ((addr >> 19) & 0x07));
  604. buf[i] = (uint8_t)(addr >> 11);
  605. buf[i + 3] = (uint8_t)(0xF8 | ((addr >> 8) & 0x07));
  606. buf[i + 2] = (uint8_t)addr;
  607. i += 2;
  608. }
  609. }
  610. return i;
  611. }
  612. #endif
  613. #ifdef XZ_DEC_SPARC
  614. static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
  615. {
  616. size_t i;
  617. uint32_t instr;
  618. for (i = 0; i + 4 <= size; i += 4) {
  619. instr = get_unaligned_be32(buf + i);
  620. if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) {
  621. instr <<= 2;
  622. instr -= s->pos + (uint32_t)i;
  623. instr >>= 2;
  624. instr = ((uint32_t)0x40000000 - (instr & 0x400000))
  625. | 0x40000000 | (instr & 0x3FFFFF);
  626. put_unaligned_be32(instr, buf + i);
  627. }
  628. }
  629. return i;
  630. }
  631. #endif
  632. /*
  633. * Apply the selected BCJ filter. Update *pos and s->pos to match the amount
  634. * of data that got filtered.
  635. *
  636. * NOTE: This is implemented as a switch statement to avoid using function
  637. * pointers, which could be problematic in the kernel boot code, which must
  638. * avoid pointers to static data (at least on x86).
  639. */
  640. static void bcj_apply(struct xz_dec_bcj *s,
  641. uint8_t *buf, size_t *pos, size_t size)
  642. {
  643. size_t filtered;
  644. buf += *pos;
  645. size -= *pos;
  646. switch (s->type) {
  647. #ifdef XZ_DEC_X86
  648. case BCJ_X86:
  649. filtered = bcj_x86(s, buf, size);
  650. break;
  651. #endif
  652. #ifdef XZ_DEC_POWERPC
  653. case BCJ_POWERPC:
  654. filtered = bcj_powerpc(s, buf, size);
  655. break;
  656. #endif
  657. #ifdef XZ_DEC_IA64
  658. case BCJ_IA64:
  659. filtered = bcj_ia64(s, buf, size);
  660. break;
  661. #endif
  662. #ifdef XZ_DEC_ARM
  663. case BCJ_ARM:
  664. filtered = bcj_arm(s, buf, size);
  665. break;
  666. #endif
  667. #ifdef XZ_DEC_ARMTHUMB
  668. case BCJ_ARMTHUMB:
  669. filtered = bcj_armthumb(s, buf, size);
  670. break;
  671. #endif
  672. #ifdef XZ_DEC_SPARC
  673. case BCJ_SPARC:
  674. filtered = bcj_sparc(s, buf, size);
  675. break;
  676. #endif
  677. default:
  678. /* Never reached but silence compiler warnings. */
  679. filtered = 0;
  680. break;
  681. }
  682. *pos += filtered;
  683. s->pos += filtered;
  684. }
  685. /*
  686. * Flush pending filtered data from temp to the output buffer.
  687. * Move the remaining mixture of possibly filtered and unfiltered
  688. * data to the beginning of temp.
  689. */
  690. static void bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b)
  691. {
  692. size_t copy_size;
  693. copy_size = minof(s->temp.filtered, b->out_size - b->out_pos);
  694. memcpy(b->out + b->out_pos, s->temp.buf, copy_size);
  695. b->out_pos += copy_size;
  696. s->temp.filtered -= copy_size;
  697. s->temp.size -= copy_size;
  698. memmove(s->temp.buf, s->temp.buf + copy_size, s->temp.size);
  699. }
  700. /*
  701. * The BCJ filter functions are primitive in sense that they process the
  702. * data in chunks of 1-16 bytes. To hide this issue, this function does
  703. * some buffering.
  704. */
  705. enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
  706. struct xz_dec_lzma2 *lzma2,
  707. struct xz_buf *b)
  708. {
  709. size_t out_start;
  710. /*
  711. * Flush pending already filtered data to the output buffer. Return
  712. * immediatelly if we couldn't flush everything, or if the next
  713. * filter in the chain had already returned XZ_STREAM_END.
  714. */
  715. if (s->temp.filtered > 0) {
  716. bcj_flush(s, b);
  717. if (s->temp.filtered > 0)
  718. return XZ_OK;
  719. if (s->ret == XZ_STREAM_END)
  720. return XZ_STREAM_END;
  721. }
  722. /*
  723. * If we have more output space than what is currently pending in
  724. * temp, copy the unfiltered data from temp to the output buffer
  725. * and try to fill the output buffer by decoding more data from the
  726. * next filter in the chain. Apply the BCJ filter on the new data
  727. * in the output buffer. If everything cannot be filtered, copy it
  728. * to temp and rewind the output buffer position accordingly.
  729. *
  730. * This needs to be always run when temp.size == 0 to handle a special
  731. * case where the output buffer is full and the next filter has no
  732. * more output coming but hasn't returned XZ_STREAM_END yet.
  733. */
  734. if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) {
  735. out_start = b->out_pos;
  736. memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);
  737. b->out_pos += s->temp.size;
  738. s->ret = xz_dec_lzma2_run(lzma2, b);
  739. if (s->ret != XZ_STREAM_END
  740. && (s->ret != XZ_OK ))
  741. return s->ret;
  742. bcj_apply(s, b->out, &out_start, b->out_pos);
  743. /*
  744. * As an exception, if the next filter returned XZ_STREAM_END,
  745. * we can do that too, since the last few bytes that remain
  746. * unfiltered are meant to remain unfiltered.
  747. */
  748. if (s->ret == XZ_STREAM_END)
  749. return XZ_STREAM_END;
  750. s->temp.size = b->out_pos - out_start;
  751. b->out_pos -= s->temp.size;
  752. memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size);
  753. /*
  754. * If there wasn't enough input to the next filter to fill
  755. * the output buffer with unfiltered data, there's no point
  756. * to try decoding more data to temp.
  757. */
  758. if (b->out_pos + s->temp.size < b->out_size)
  759. return XZ_OK;
  760. }
  761. /*
  762. * We have unfiltered data in temp. If the output buffer isn't full
  763. * yet, try to fill the temp buffer by decoding more data from the
  764. * next filter. Apply the BCJ filter on temp. Then we hopefully can
  765. * fill the actual output buffer by copying filtered data from temp.
  766. * A mix of filtered and unfiltered data may be left in temp; it will
  767. * be taken care on the next call to this function.
  768. */
  769. if (b->out_pos < b->out_size) {
  770. /* Make b->out{,_pos,_size} temporarily point to s->temp. */
  771. s->out = b->out;
  772. s->out_pos = b->out_pos;
  773. s->out_size = b->out_size;
  774. b->out = s->temp.buf;
  775. b->out_pos = s->temp.size;
  776. b->out_size = sizeof(s->temp.buf);
  777. s->ret = xz_dec_lzma2_run(lzma2, b);
  778. s->temp.size = b->out_pos;
  779. b->out = s->out;
  780. b->out_pos = s->out_pos;
  781. b->out_size = s->out_size;
  782. if (s->ret != XZ_OK && s->ret != XZ_STREAM_END)
  783. return s->ret;
  784. bcj_apply(s, s->temp.buf, &s->temp.filtered, s->temp.size);
  785. /*
  786. * If the next filter returned XZ_STREAM_END, we mark that
  787. * everything is filtered, since the last unfiltered bytes
  788. * of the stream are meant to be left as is.
  789. */
  790. if (s->ret == XZ_STREAM_END)
  791. s->temp.filtered = s->temp.size;
  792. bcj_flush(s, b);
  793. if (s->temp.filtered > 0)
  794. return XZ_OK;
  795. }
  796. return s->ret;
  797. }
  798. enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
  799. {
  800. switch (id) {
  801. #ifdef XZ_DEC_X86
  802. case BCJ_X86:
  803. #endif
  804. #ifdef XZ_DEC_POWERPC
  805. case BCJ_POWERPC:
  806. #endif
  807. #ifdef XZ_DEC_IA64
  808. case BCJ_IA64:
  809. #endif
  810. #ifdef XZ_DEC_ARM
  811. case BCJ_ARM:
  812. #endif
  813. #ifdef XZ_DEC_ARMTHUMB
  814. case BCJ_ARMTHUMB:
  815. #endif
  816. #ifdef XZ_DEC_SPARC
  817. case BCJ_SPARC:
  818. #endif
  819. break;
  820. default:
  821. /* Unsupported Filter ID */
  822. return XZ_OPTIONS_ERROR;
  823. }
  824. s->type = id;
  825. s->ret = XZ_OK;
  826. s->pos = 0;
  827. s->x86_prev_mask = 0;
  828. s->temp.filtered = 0;
  829. s->temp.size = 0;
  830. return XZ_OK;
  831. }
  832. #endif
  833. /*
  834. * LZMA2 decoder
  835. */
  836. // BEGIN xz_lzma2.h
  837. /*
  838. * LZMA2 definitions
  839. *
  840. */
  841. /* Range coder constants */
  842. #define RC_SHIFT_BITS 8
  843. #define RC_TOP_BITS 24
  844. #define RC_TOP_VALUE (1 << RC_TOP_BITS)
  845. #define RC_BIT_MODEL_TOTAL_BITS 11
  846. #define RC_BIT_MODEL_TOTAL (1 << RC_BIT_MODEL_TOTAL_BITS)
  847. #define RC_MOVE_BITS 5
  848. /*
  849. * Maximum number of position states. A position state is the lowest pb
  850. * number of bits of the current uncompressed offset. In some places there
  851. * are different sets of probabilities for different position states.
  852. */
  853. #define POS_STATES_MAX (1 << 4)
  854. /*
  855. * This enum is used to track which LZMA symbols have occurred most recently
  856. * and in which order. This information is used to predict the next symbol.
  857. *
  858. * Symbols:
  859. * - Literal: One 8-bit byte
  860. * - Match: Repeat a chunk of data at some distance
  861. * - Long repeat: Multi-byte match at a recently seen distance
  862. * - Short repeat: One-byte repeat at a recently seen distance
  863. *
  864. * The symbol names are in from STATE_oldest_older_previous. REP means
  865. * either short or long repeated match, and NONLIT means any non-literal.
  866. */
  867. enum lzma_state {
  868. STATE_LIT_LIT,
  869. STATE_MATCH_LIT_LIT,
  870. STATE_REP_LIT_LIT,
  871. STATE_SHORTREP_LIT_LIT,
  872. STATE_MATCH_LIT,
  873. STATE_REP_LIT,
  874. STATE_SHORTREP_LIT,
  875. STATE_LIT_MATCH,
  876. STATE_LIT_LONGREP,
  877. STATE_LIT_SHORTREP,
  878. STATE_NONLIT_MATCH,
  879. STATE_NONLIT_REP
  880. };
  881. /* Total number of states */
  882. #define STATES 12
  883. /* The lowest 7 states indicate that the previous state was a literal. */
  884. #define LIT_STATES 7
  885. /* Indicate that the latest symbol was a literal. */
  886. static inline void lzma_state_literal(enum lzma_state *state)
  887. {
  888. if (*state <= STATE_SHORTREP_LIT_LIT)
  889. *state = STATE_LIT_LIT;
  890. else if (*state <= STATE_LIT_SHORTREP)
  891. *state -= 3;
  892. else
  893. *state -= 6;
  894. }
  895. /* Indicate that the latest symbol was a match. */
  896. static inline void lzma_state_match(enum lzma_state *state)
  897. {
  898. *state = *state < LIT_STATES ? STATE_LIT_MATCH : STATE_NONLIT_MATCH;
  899. }
  900. /* Indicate that the latest state was a long repeated match. */
  901. static inline void lzma_state_long_rep(enum lzma_state *state)
  902. {
  903. *state = *state < LIT_STATES ? STATE_LIT_LONGREP : STATE_NONLIT_REP;
  904. }
  905. /* Indicate that the latest symbol was a short match. */
  906. static inline void lzma_state_short_rep(enum lzma_state *state)
  907. {
  908. *state = *state < LIT_STATES ? STATE_LIT_SHORTREP : STATE_NONLIT_REP;
  909. }
  910. /* Test if the previous symbol was a literal. */
  911. static inline int lzma_state_is_literal(enum lzma_state state)
  912. {
  913. return state < LIT_STATES;
  914. }
  915. /* Each literal coder is divided in three sections:
  916. * - 0x001-0x0FF: Without match byte
  917. * - 0x101-0x1FF: With match byte; match bit is 0
  918. * - 0x201-0x2FF: With match byte; match bit is 1
  919. *
  920. * Match byte is used when the previous LZMA symbol was something else than
  921. * a literal (that is, it was some kind of match).
  922. */
  923. #define LITERAL_CODER_SIZE 0x300
  924. /* Maximum number of literal coders */
  925. #define LITERAL_CODERS_MAX (1 << 4)
  926. /* Minimum length of a match is two bytes. */
  927. #define MATCH_LEN_MIN 2
  928. /* Match length is encoded with 4, 5, or 10 bits.
  929. *
  930. * Length Bits
  931. * 2-9 4 = Choice=0 + 3 bits
  932. * 10-17 5 = Choice=1 + Choice2=0 + 3 bits
  933. * 18-273 10 = Choice=1 + Choice2=1 + 8 bits
  934. */
  935. #define LEN_LOW_BITS 3
  936. #define LEN_LOW_SYMBOLS (1 << LEN_LOW_BITS)
  937. #define LEN_MID_BITS 3
  938. #define LEN_MID_SYMBOLS (1 << LEN_MID_BITS)
  939. #define LEN_HIGH_BITS 8
  940. #define LEN_HIGH_SYMBOLS (1 << LEN_HIGH_BITS)
  941. #define LEN_SYMBOLS (LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS + LEN_HIGH_SYMBOLS)
  942. /*
  943. * Maximum length of a match is 273 which is a result of the encoding
  944. * described above.
  945. */
  946. #define MATCH_LEN_MAX (MATCH_LEN_MIN + LEN_SYMBOLS - 1)
  947. /*
  948. * Different sets of probabilities are used for match distances that have
  949. * very short match length: Lengths of 2, 3, and 4 bytes have a separate
  950. * set of probabilities for each length. The matches with longer length
  951. * use a shared set of probabilities.
  952. */
  953. #define DIST_STATES 4
  954. /*
  955. * Get the index of the appropriate probability array for decoding
  956. * the distance slot.
  957. */
  958. static inline uint32_t lzma_get_dist_state(uint32_t len)
  959. {
  960. return len < DIST_STATES + MATCH_LEN_MIN
  961. ? len - MATCH_LEN_MIN : DIST_STATES - 1;
  962. }
  963. /*
  964. * The highest two bits of a 32-bit match distance are encoded using six bits.
  965. * This six-bit value is called a distance slot. This way encoding a 32-bit
  966. * value takes 6-36 bits, larger values taking more bits.
  967. */
  968. #define DIST_SLOT_BITS 6
  969. #define DIST_SLOTS (1 << DIST_SLOT_BITS)
  970. /* Match distances up to 127 are fully encoded using probabilities. Since
  971. * the highest two bits (distance slot) are always encoded using six bits,
  972. * the distances 0-3 don't need any additional bits to encode, since the
  973. * distance slot itself is the same as the actual distance. DIST_MODEL_START
  974. * indicates the first distance slot where at least one additional bit is
  975. * needed.
  976. */
  977. #define DIST_MODEL_START 4
  978. /*
  979. * Match distances greater than 127 are encoded in three pieces:
  980. * - distance slot: the highest two bits
  981. * - direct bits: 2-26 bits below the highest two bits
  982. * - alignment bits: four lowest bits
  983. *
  984. * Direct bits don't use any probabilities.
  985. *
  986. * The distance slot value of 14 is for distances 128-191.
  987. */
  988. #define DIST_MODEL_END 14
  989. /* Distance slots that indicate a distance <= 127. */
  990. #define FULL_DISTANCES_BITS (DIST_MODEL_END / 2)
  991. #define FULL_DISTANCES (1 << FULL_DISTANCES_BITS)
  992. /*
  993. * For match distances greater than 127, only the highest two bits and the
  994. * lowest four bits (alignment) is encoded using probabilities.
  995. */
  996. #define ALIGN_BITS 4
  997. #define ALIGN_SIZE (1 << ALIGN_BITS)
  998. #define ALIGN_MASK (ALIGN_SIZE - 1)
  999. /* Total number of all probability variables */
  1000. #define PROBS_TOTAL (1846 + LITERAL_CODERS_MAX * LITERAL_CODER_SIZE)
  1001. /*
  1002. * LZMA remembers the four most recent match distances. Reusing these
  1003. * distances tends to take less space than re-encoding the actual
  1004. * distance value.
  1005. */
  1006. #define REPS 4
  1007. // END xz_lzma2.h
  1008. /*
  1009. * Range decoder initialization eats the first five bytes of each LZMA chunk.
  1010. */
  1011. #define RC_INIT_BYTES 5
  1012. /*
  1013. * Minimum number of usable input buffer to safely decode one LZMA symbol.
  1014. * The worst case is that we decode 22 bits using probabilities and 26
  1015. * direct bits. This may decode at maximum of 20 bytes of input. However,
  1016. * lzma_main() does an extra normalization before returning, thus we
  1017. * need to put 21 here.
  1018. */
  1019. #define LZMA_IN_REQUIRED 21
  1020. /*
  1021. * Dictionary (history buffer)
  1022. *
  1023. * These are always true:
  1024. * start <= pos <= full <= end
  1025. * pos <= limit <= end
  1026. * end == size
  1027. * size <= size_max
  1028. * allocated <= size
  1029. *
  1030. * Most of these variables are size_t as a relic of single-call mode,
  1031. * in which the dictionary variables address the actual output
  1032. * buffer directly.
  1033. */
  1034. struct dictionary {
  1035. /* Beginning of the history buffer */
  1036. uint8_t *buf;
  1037. /* Old position in buf (before decoding more data) */
  1038. size_t start;
  1039. /* Position in buf */
  1040. size_t pos;
  1041. /*
  1042. * How full dictionary is. This is used to detect corrupt input that
  1043. * would read beyond the beginning of the uncompressed stream.
  1044. */
  1045. size_t full;
  1046. /* Write limit; we don't write to buf[limit] or later bytes. */
  1047. size_t limit;
  1048. /* End of the dictionary buffer. This is the same as the dictionary size. */
  1049. size_t end;
  1050. /*
  1051. * Size of the dictionary as specified in Block Header. This is used
  1052. * together with "full" to detect corrupt input that would make us
  1053. * read beyond the beginning of the uncompressed stream.
  1054. */
  1055. uint32_t size;
  1056. /*
  1057. * Maximum allowed dictionary size.
  1058. */
  1059. uint32_t size_max;
  1060. /*
  1061. * Amount of memory currently allocated for the dictionary.
  1062. */
  1063. uint32_t allocated;
  1064. };
  1065. /* Range decoder */
  1066. struct rc_dec {
  1067. uint32_t range;
  1068. uint32_t code;
  1069. /*
  1070. * Number of initializing bytes remaining to be read
  1071. * by rc_read_init().
  1072. */
  1073. uint32_t init_bytes_left;
  1074. /*
  1075. * Buffer from which we read our input. It can be either
  1076. * temp.buf or the caller-provided input buffer.
  1077. */
  1078. const uint8_t *in;
  1079. size_t in_pos;
  1080. size_t in_limit;
  1081. };
  1082. /* Probabilities for a length decoder. */
  1083. struct lzma_len_dec {
  1084. /* Probability of match length being at least 10 */
  1085. uint16_t choice;
  1086. /* Probability of match length being at least 18 */
  1087. uint16_t choice2;
  1088. /* Probabilities for match lengths 2-9 */
  1089. uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS];
  1090. /* Probabilities for match lengths 10-17 */
  1091. uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS];
  1092. /* Probabilities for match lengths 18-273 */
  1093. uint16_t high[LEN_HIGH_SYMBOLS];
  1094. };
  1095. struct lzma_dec {
  1096. /* Distances of latest four matches */
  1097. uint32_t rep0;
  1098. uint32_t rep1;
  1099. uint32_t rep2;
  1100. uint32_t rep3;
  1101. /* Types of the most recently seen LZMA symbols */
  1102. enum lzma_state state;
  1103. /*
  1104. * Length of a match. This is updated so that dict_repeat can
  1105. * be called again to finish repeating the whole match.
  1106. */
  1107. uint32_t len;
  1108. /*
  1109. * LZMA properties or related bit masks (number of literal
  1110. * context bits, a mask dervied from the number of literal
  1111. * position bits, and a mask dervied from the number
  1112. * position bits)
  1113. */
  1114. uint32_t lc;
  1115. uint32_t literal_pos_mask; /* (1 << lp) - 1 */
  1116. uint32_t pos_mask; /* (1 << pb) - 1 */
  1117. /* If 1, it's a match. Otherwise it's a single 8-bit literal. */
  1118. uint16_t is_match[STATES][POS_STATES_MAX];
  1119. /* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */
  1120. uint16_t is_rep[STATES];
  1121. /*
  1122. * If 0, distance of a repeated match is rep0.
  1123. * Otherwise check is_rep1.
  1124. */
  1125. uint16_t is_rep0[STATES];
  1126. /*
  1127. * If 0, distance of a repeated match is rep1.
  1128. * Otherwise check is_rep2.
  1129. */
  1130. uint16_t is_rep1[STATES];
  1131. /* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */
  1132. uint16_t is_rep2[STATES];
  1133. /*
  1134. * If 1, the repeated match has length of one byte. Otherwise
  1135. * the length is decoded from rep_len_decoder.
  1136. */
  1137. uint16_t is_rep0_long[STATES][POS_STATES_MAX];
  1138. /*
  1139. * Probability tree for the highest two bits of the match
  1140. * distance. There is a separate probability tree for match
  1141. * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273].
  1142. */
  1143. uint16_t dist_slot[DIST_STATES][DIST_SLOTS];
  1144. /*
  1145. * Probility trees for additional bits for match distance
  1146. * when the distance is in the range [4, 127].
  1147. */
  1148. uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END];
  1149. /*
  1150. * Probability tree for the lowest four bits of a match
  1151. * distance that is equal to or greater than 128.
  1152. */
  1153. uint16_t dist_align[ALIGN_SIZE];
  1154. /* Length of a normal match */
  1155. struct lzma_len_dec match_len_dec;
  1156. /* Length of a repeated match */
  1157. struct lzma_len_dec rep_len_dec;
  1158. /* Probabilities of literals */
  1159. uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE];
  1160. };
  1161. struct lzma2_dec {
  1162. /* Position in xz_dec_lzma2_run(). */
  1163. enum lzma2_seq {
  1164. SEQ_CONTROL,
  1165. SEQ_UNCOMPRESSED_1,
  1166. SEQ_UNCOMPRESSED_2,
  1167. SEQ_COMPRESSED_0,
  1168. SEQ_COMPRESSED_1,
  1169. SEQ_PROPERTIES,
  1170. SEQ_LZMA_PREPARE,
  1171. SEQ_LZMA_RUN,
  1172. SEQ_COPY
  1173. } sequence;
  1174. /* Next position after decoding the compressed size of the chunk. */
  1175. enum lzma2_seq next_sequence;
  1176. /* Uncompressed size of LZMA chunk (2 MiB at maximum) */
  1177. uint32_t uncompressed;
  1178. /*
  1179. * Compressed size of LZMA chunk or compressed/uncompressed
  1180. * size of uncompressed chunk (64 KiB at maximum)
  1181. */
  1182. uint32_t compressed;
  1183. /*
  1184. * True if dictionary reset is needed. This is false before
  1185. * the first chunk (LZMA or uncompressed).
  1186. */
  1187. int need_dict_reset;
  1188. /*
  1189. * True if new LZMA properties are needed. This is false
  1190. * before the first LZMA chunk.
  1191. */
  1192. int need_props;
  1193. };
  1194. struct xz_dec_lzma2 {
  1195. /*
  1196. * The order below is important on x86 to reduce code size and
  1197. * it shouldn't hurt on other platforms. Everything up to and
  1198. * including lzma.pos_mask are in the first 128 bytes on x86-32,
  1199. * which allows using smaller instructions to access those
  1200. * variables. On x86-64, fewer variables fit into the first 128
  1201. * bytes, but this is still the best order without sacrificing
  1202. * the readability by splitting the structures.
  1203. */
  1204. struct rc_dec rc;
  1205. struct dictionary dict;
  1206. struct lzma2_dec lzma2;
  1207. struct lzma_dec lzma;
  1208. /*
  1209. * Temporary buffer which holds small number of input bytes between
  1210. * decoder calls. See lzma2_lzma() for details.
  1211. */
  1212. struct {
  1213. uint32_t size;
  1214. uint8_t buf[3 * LZMA_IN_REQUIRED];
  1215. } temp;
  1216. };
  1217. /**************
  1218. * Dictionary *
  1219. **************/
  1220. /* Reset the dictionary state. */
  1221. static void dict_reset(struct dictionary *dict)
  1222. {
  1223. dict->start = 0;
  1224. dict->pos = 0;
  1225. dict->limit = 0;
  1226. dict->full = 0;
  1227. }
  1228. /* Set dictionary write limit */
  1229. static void dict_limit(struct dictionary *dict, size_t out_max)
  1230. {
  1231. if (dict->end - dict->pos <= out_max)
  1232. dict->limit = dict->end;
  1233. else
  1234. dict->limit = dict->pos + out_max;
  1235. }
  1236. /* Return true if at least one byte can be written into the dictionary. */
  1237. static inline int dict_has_space(const struct dictionary *dict)
  1238. {
  1239. return dict->pos < dict->limit;
  1240. }
  1241. /*
  1242. * Get a byte from the dictionary at the given distance. The distance is
  1243. * assumed to valid, or as a special case, zero when the dictionary is
  1244. * still empty. This special case is needed for single-call decoding to
  1245. * avoid writing a '\0' to the end of the destination buffer.
  1246. */
  1247. static inline uint32_t dict_get(const struct dictionary *dict, uint32_t dist)
  1248. {
  1249. size_t offset = dict->pos - dist - 1;
  1250. if (dist >= dict->pos)
  1251. offset += dict->end;
  1252. return dict->full > 0 ? dict->buf[offset] : 0;
  1253. }
  1254. /*
  1255. * Put one byte into the dictionary. It is assumed that there is space for it.
  1256. */
  1257. static inline void dict_put(struct dictionary *dict, uint8_t byte)
  1258. {
  1259. dict->buf[dict->pos++] = byte;
  1260. if (dict->full < dict->pos)
  1261. dict->full = dict->pos;
  1262. }
  1263. /*
  1264. * Repeat given number of bytes from the given distance. If the distance is
  1265. * invalid, false is returned. On success, true is returned and *len is
  1266. * updated to indicate how many bytes were left to be repeated.
  1267. */
  1268. static int dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist)
  1269. {
  1270. size_t back;
  1271. uint32_t left;
  1272. if (dist >= dict->full || dist >= dict->size) return 0;
  1273. left = minof(dict->limit - dict->pos, *len);
  1274. *len -= left;
  1275. back = dict->pos - dist - 1;
  1276. if (dist >= dict->pos)
  1277. back += dict->end;
  1278. do {
  1279. dict->buf[dict->pos++] = dict->buf[back++];
  1280. if (back == dict->end)
  1281. back = 0;
  1282. } while (--left > 0);
  1283. if (dict->full < dict->pos)
  1284. dict->full = dict->pos;
  1285. return 1;
  1286. }
  1287. /* Copy uncompressed data as is from input to dictionary and output buffers. */
  1288. static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
  1289. uint32_t *left)
  1290. {
  1291. size_t copy_size;
  1292. while (*left > 0 && b->in_pos < b->in_size
  1293. && b->out_pos < b->out_size) {
  1294. copy_size = minof(b->in_size - b->in_pos,
  1295. b->out_size - b->out_pos);
  1296. if (copy_size > dict->end - dict->pos)
  1297. copy_size = dict->end - dict->pos;
  1298. if (copy_size > *left)
  1299. copy_size = *left;
  1300. *left -= copy_size;
  1301. memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size);
  1302. dict->pos += copy_size;
  1303. if (dict->full < dict->pos)
  1304. dict->full = dict->pos;
  1305. if (dict->pos == dict->end)
  1306. dict->pos = 0;
  1307. memcpy(b->out + b->out_pos, b->in + b->in_pos,
  1308. copy_size);
  1309. dict->start = dict->pos;
  1310. b->out_pos += copy_size;
  1311. b->in_pos += copy_size;
  1312. }
  1313. }
  1314. /*
  1315. * Flush pending data from dictionary to b->out. It is assumed that there is
  1316. * enough space in b->out. This is guaranteed because caller uses dict_limit()
  1317. * before decoding data into the dictionary.
  1318. */
  1319. static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b)
  1320. {
  1321. size_t copy_size = dict->pos - dict->start;
  1322. if (dict->pos == dict->end)
  1323. dict->pos = 0;
  1324. memcpy(b->out + b->out_pos, dict->buf + dict->start,
  1325. copy_size);
  1326. dict->start = dict->pos;
  1327. b->out_pos += copy_size;
  1328. return copy_size;
  1329. }
  1330. /*****************
  1331. * Range decoder *
  1332. *****************/
  1333. /* Reset the range decoder. */
  1334. static void rc_reset(struct rc_dec *rc)
  1335. {
  1336. rc->range = (uint32_t)-1;
  1337. rc->code = 0;
  1338. rc->init_bytes_left = RC_INIT_BYTES;
  1339. }
  1340. /*
  1341. * Read the first five initial bytes into rc->code if they haven't been
  1342. * read already. (Yes, the first byte gets completely ignored.)
  1343. */
  1344. static int rc_read_init(struct rc_dec *rc, struct xz_buf *b)
  1345. {
  1346. while (rc->init_bytes_left > 0) {
  1347. if (b->in_pos == b->in_size) return 0;
  1348. rc->code = (rc->code << 8) + b->in[b->in_pos++];
  1349. --rc->init_bytes_left;
  1350. }
  1351. return 1;
  1352. }
  1353. /* Return true if there may not be enough input for the next decoding loop. */
  1354. static inline int rc_limit_exceeded(const struct rc_dec *rc)
  1355. {
  1356. return rc->in_pos > rc->in_limit;
  1357. }
  1358. /*
  1359. * Return true if it is possible (from point of view of range decoder) that
  1360. * we have reached the end of the LZMA chunk.
  1361. */
  1362. static inline int rc_is_finished(const struct rc_dec *rc)
  1363. {
  1364. return rc->code == 0;
  1365. }
  1366. /* Read the next input byte if needed. */
  1367. static inline void rc_normalize(struct rc_dec *rc)
  1368. {
  1369. if (rc->range < RC_TOP_VALUE) {
  1370. rc->range <<= RC_SHIFT_BITS;
  1371. rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++];
  1372. }
  1373. }
  1374. /*
  1375. * Decode one bit. In some versions, this function has been splitted in three
  1376. * functions so that the compiler is supposed to be able to more easily avoid
  1377. * an extra branch. In this particular version of the LZMA decoder, this
  1378. * doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
  1379. * on x86). Using a non-splitted version results in nicer looking code too.
  1380. *
  1381. * NOTE: This must return an int. Do not make it return a bool or the speed
  1382. * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care,
  1383. * and it generates 10-20 % faster code than GCC 3.x from this file anyway.)
  1384. */
  1385. static inline int rc_bit(struct rc_dec *rc, uint16_t *prob)
  1386. {
  1387. uint32_t bound;
  1388. int bit;
  1389. rc_normalize(rc);
  1390. bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob;
  1391. if (rc->code < bound) {
  1392. rc->range = bound;
  1393. *prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS;
  1394. bit = 0;
  1395. } else {
  1396. rc->range -= bound;
  1397. rc->code -= bound;
  1398. *prob -= *prob >> RC_MOVE_BITS;
  1399. bit = 1;
  1400. }
  1401. return bit;
  1402. }
  1403. /* Decode a bittree starting from the most significant bit. */
  1404. static inline uint32_t rc_bittree(struct rc_dec *rc,
  1405. uint16_t *probs, uint32_t limit)
  1406. {
  1407. uint32_t symbol = 1;
  1408. do {
  1409. if (rc_bit(rc, &probs[symbol]))
  1410. symbol = (symbol << 1) + 1;
  1411. else
  1412. symbol <<= 1;
  1413. } while (symbol < limit);
  1414. return symbol;
  1415. }
  1416. /* Decode a bittree starting from the least significant bit. */
  1417. static inline void rc_bittree_reverse(struct rc_dec *rc,
  1418. uint16_t *probs,
  1419. uint32_t *dest, uint32_t limit)
  1420. {
  1421. uint32_t symbol = 1;
  1422. uint32_t i = 0;
  1423. do {
  1424. if (rc_bit(rc, &probs[symbol])) {
  1425. symbol = (symbol << 1) + 1;
  1426. *dest += 1 << i;
  1427. } else {
  1428. symbol <<= 1;
  1429. }
  1430. } while (++i < limit);
  1431. }
  1432. /* Decode direct bits (fixed fifty-fifty probability) */
  1433. static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit)
  1434. {
  1435. uint32_t mask;
  1436. do {
  1437. rc_normalize(rc);
  1438. rc->range >>= 1;
  1439. rc->code -= rc->range;
  1440. mask = (uint32_t)0 - (rc->code >> 31);
  1441. rc->code += rc->range & mask;
  1442. *dest = (*dest << 1) + (mask + 1);
  1443. } while (--limit > 0);
  1444. }
  1445. /********
  1446. * LZMA *
  1447. ********/
  1448. /* Get pointer to literal coder probability array. */
  1449. static uint16_t *lzma_literal_probs(struct xz_dec_lzma2 *s)
  1450. {
  1451. uint32_t prev_byte = dict_get(&s->dict, 0);
  1452. uint32_t low = prev_byte >> (8 - s->lzma.lc);
  1453. uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc;
  1454. return s->lzma.literal[low + high];
  1455. }
  1456. /* Decode a literal (one 8-bit byte) */
  1457. static void lzma_literal(struct xz_dec_lzma2 *s)
  1458. {
  1459. uint16_t *probs;
  1460. uint32_t symbol;
  1461. uint32_t match_byte;
  1462. uint32_t match_bit;
  1463. uint32_t offset;
  1464. uint32_t i;
  1465. probs = lzma_literal_probs(s);
  1466. if (lzma_state_is_literal(s->lzma.state)) {
  1467. symbol = rc_bittree(&s->rc, probs, 0x100);
  1468. } else {
  1469. symbol = 1;
  1470. match_byte = dict_get(&s->dict, s->lzma.rep0) << 1;
  1471. offset = 0x100;
  1472. do {
  1473. match_bit = match_byte & offset;
  1474. match_byte <<= 1;
  1475. i = offset + match_bit + symbol;
  1476. if (rc_bit(&s->rc, &probs[i])) {
  1477. symbol = (symbol << 1) + 1;
  1478. offset &= match_bit;
  1479. } else {
  1480. symbol <<= 1;
  1481. offset &= ~match_bit;
  1482. }
  1483. } while (symbol < 0x100);
  1484. }
  1485. dict_put(&s->dict, (uint8_t)symbol);
  1486. lzma_state_literal(&s->lzma.state);
  1487. }
  1488. /* Decode the length of the match into s->lzma.len. */
  1489. static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l,
  1490. uint32_t pos_state)
  1491. {
  1492. uint16_t *probs;
  1493. uint32_t limit;
  1494. if (!rc_bit(&s->rc, &l->choice)) {
  1495. probs = l->low[pos_state];
  1496. limit = LEN_LOW_SYMBOLS;
  1497. s->lzma.len = MATCH_LEN_MIN;
  1498. } else {
  1499. if (!rc_bit(&s->rc, &l->choice2)) {
  1500. probs = l->mid[pos_state];
  1501. limit = LEN_MID_SYMBOLS;
  1502. s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS;
  1503. } else {
  1504. probs = l->high;
  1505. limit = LEN_HIGH_SYMBOLS;
  1506. s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS
  1507. + LEN_MID_SYMBOLS;
  1508. }
  1509. }
  1510. s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit;
  1511. }
  1512. /* Decode a match. The distance will be stored in s->lzma.rep0. */
  1513. static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
  1514. {
  1515. uint16_t *probs;
  1516. uint32_t dist_slot;
  1517. uint32_t limit;
  1518. lzma_state_match(&s->lzma.state);
  1519. s->lzma.rep3 = s->lzma.rep2;
  1520. s->lzma.rep2 = s->lzma.rep1;
  1521. s->lzma.rep1 = s->lzma.rep0;
  1522. lzma_len(s, &s->lzma.match_len_dec, pos_state);
  1523. probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)];
  1524. dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS;
  1525. if (dist_slot < DIST_MODEL_START) {
  1526. s->lzma.rep0 = dist_slot;
  1527. } else {
  1528. limit = (dist_slot >> 1) - 1;
  1529. s->lzma.rep0 = 2 + (dist_slot & 1);
  1530. if (dist_slot < DIST_MODEL_END) {
  1531. s->lzma.rep0 <<= limit;
  1532. probs = s->lzma.dist_special + s->lzma.rep0
  1533. - dist_slot - 1;
  1534. rc_bittree_reverse(&s->rc, probs,
  1535. &s->lzma.rep0, limit);
  1536. } else {
  1537. rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS);
  1538. s->lzma.rep0 <<= ALIGN_BITS;
  1539. rc_bittree_reverse(&s->rc, s->lzma.dist_align,
  1540. &s->lzma.rep0, ALIGN_BITS);
  1541. }
  1542. }
  1543. }
  1544. /*
  1545. * Decode a repeated match. The distance is one of the four most recently
  1546. * seen matches. The distance will be stored in s->lzma.rep0.
  1547. */
  1548. static void lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
  1549. {
  1550. uint32_t tmp;
  1551. if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) {
  1552. if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[
  1553. s->lzma.state][pos_state])) {
  1554. lzma_state_short_rep(&s->lzma.state);
  1555. s->lzma.len = 1;
  1556. return;
  1557. }
  1558. } else {
  1559. if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) {
  1560. tmp = s->lzma.rep1;
  1561. } else {
  1562. if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) {
  1563. tmp = s->lzma.rep2;
  1564. } else {
  1565. tmp = s->lzma.rep3;
  1566. s->lzma.rep3 = s->lzma.rep2;
  1567. }
  1568. s->lzma.rep2 = s->lzma.rep1;
  1569. }
  1570. s->lzma.rep1 = s->lzma.rep0;
  1571. s->lzma.rep0 = tmp;
  1572. }
  1573. lzma_state_long_rep(&s->lzma.state);
  1574. lzma_len(s, &s->lzma.rep_len_dec, pos_state);
  1575. }
  1576. /* LZMA decoder core */
  1577. static int lzma_main(struct xz_dec_lzma2 *s)
  1578. {
  1579. uint32_t pos_state;
  1580. /*
  1581. * If the dictionary was reached during the previous call, try to
  1582. * finish the possibly pending repeat in the dictionary.
  1583. */
  1584. if (dict_has_space(&s->dict) && s->lzma.len > 0)
  1585. dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0);
  1586. /*
  1587. * Decode more LZMA symbols. One iteration may consume up to
  1588. * LZMA_IN_REQUIRED - 1 bytes.
  1589. */
  1590. while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) {
  1591. pos_state = s->dict.pos & s->lzma.pos_mask;
  1592. if (!rc_bit(&s->rc, &s->lzma.is_match[
  1593. s->lzma.state][pos_state])) {
  1594. lzma_literal(s);
  1595. } else {
  1596. if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state]))
  1597. lzma_rep_match(s, pos_state);
  1598. else
  1599. lzma_match(s, pos_state);
  1600. if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0))
  1601. return 0;
  1602. }
  1603. }
  1604. /*
  1605. * Having the range decoder always normalized when we are outside
  1606. * this function makes it easier to correctly handle end of the chunk.
  1607. */
  1608. rc_normalize(&s->rc);
  1609. return 1;
  1610. }
  1611. /*
  1612. * Reset the LZMA decoder and range decoder state. Dictionary is nore reset
  1613. * here, because LZMA state may be reset without resetting the dictionary.
  1614. */
  1615. static void lzma_reset(struct xz_dec_lzma2 *s)
  1616. {
  1617. uint16_t *probs;
  1618. size_t i;
  1619. s->lzma.state = STATE_LIT_LIT;
  1620. s->lzma.rep0 = 0;
  1621. s->lzma.rep1 = 0;
  1622. s->lzma.rep2 = 0;
  1623. s->lzma.rep3 = 0;
  1624. /*
  1625. * All probabilities are initialized to the same value. This hack
  1626. * makes the code smaller by avoiding a separate loop for each
  1627. * probability array.
  1628. *
  1629. * This could be optimized so that only that part of literal
  1630. * probabilities that are actually required. In the common case
  1631. * we would write 12 KiB less.
  1632. */
  1633. probs = s->lzma.is_match[0];
  1634. for (i = 0; i < PROBS_TOTAL; ++i)
  1635. probs[i] = RC_BIT_MODEL_TOTAL / 2;
  1636. rc_reset(&s->rc);
  1637. }
  1638. /*
  1639. * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks
  1640. * from the decoded lp and pb values. On success, the LZMA decoder state is
  1641. * reset and true is returned.
  1642. */
  1643. static int lzma_props(struct xz_dec_lzma2 *s, uint8_t props)
  1644. {
  1645. if (props > (4 * 5 + 4) * 9 + 8)
  1646. return 0;
  1647. s->lzma.pos_mask = 0;
  1648. while (props >= 9 * 5) {
  1649. props -= 9 * 5;
  1650. ++s->lzma.pos_mask;
  1651. }
  1652. s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1;
  1653. s->lzma.literal_pos_mask = 0;
  1654. while (props >= 9) {
  1655. props -= 9;
  1656. ++s->lzma.literal_pos_mask;
  1657. }
  1658. s->lzma.lc = props;
  1659. if (s->lzma.lc + s->lzma.literal_pos_mask > 4)
  1660. return 0;
  1661. s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1;
  1662. lzma_reset(s);
  1663. return 1;
  1664. }
  1665. /*********
  1666. * LZMA2 *
  1667. *********/
  1668. /*
  1669. * The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't
  1670. * been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This
  1671. * wrapper function takes care of making the LZMA decoder's assumption safe.
  1672. *
  1673. * As long as there is plenty of input left to be decoded in the current LZMA
  1674. * chunk, we decode directly from the caller-supplied input buffer until
  1675. * there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into
  1676. * s->temp.buf, which (hopefully) gets filled on the next call to this
  1677. * function. We decode a few bytes from the temporary buffer so that we can
  1678. * continue decoding from the caller-supplied input buffer again.
  1679. */
  1680. static int lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b)
  1681. {
  1682. size_t in_avail;
  1683. uint32_t tmp;
  1684. in_avail = b->in_size - b->in_pos;
  1685. if (s->temp.size > 0 || s->lzma2.compressed == 0) {
  1686. tmp = 2 * LZMA_IN_REQUIRED - s->temp.size;
  1687. if (tmp > s->lzma2.compressed - s->temp.size)
  1688. tmp = s->lzma2.compressed - s->temp.size;
  1689. if (tmp > in_avail)
  1690. tmp = in_avail;
  1691. memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp);
  1692. if (s->temp.size + tmp == s->lzma2.compressed) {
  1693. memset(s->temp.buf + s->temp.size + tmp, 0,
  1694. sizeof(s->temp.buf)
  1695. - s->temp.size - tmp);
  1696. s->rc.in_limit = s->temp.size + tmp;
  1697. } else if (s->temp.size + tmp < LZMA_IN_REQUIRED) {
  1698. s->temp.size += tmp;
  1699. b->in_pos += tmp;
  1700. return 1;
  1701. } else {
  1702. s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED;
  1703. }
  1704. s->rc.in = s->temp.buf;
  1705. s->rc.in_pos = 0;
  1706. if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp)
  1707. return 0;
  1708. s->lzma2.compressed -= s->rc.in_pos;
  1709. if (s->rc.in_pos < s->temp.size) {
  1710. s->temp.size -= s->rc.in_pos;
  1711. memmove(s->temp.buf, s->temp.buf + s->rc.in_pos,
  1712. s->temp.size);
  1713. return 1;
  1714. }
  1715. b->in_pos += s->rc.in_pos - s->temp.size;
  1716. s->temp.size = 0;
  1717. }
  1718. in_avail = b->in_size - b->in_pos;
  1719. if (in_avail >= LZMA_IN_REQUIRED) {
  1720. s->rc.in = b->in;
  1721. s->rc.in_pos = b->in_pos;
  1722. if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED)
  1723. s->rc.in_limit = b->in_pos + s->lzma2.compressed;
  1724. else
  1725. s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED;
  1726. if (!lzma_main(s))
  1727. return 0;
  1728. in_avail = s->rc.in_pos - b->in_pos;
  1729. if (in_avail > s->lzma2.compressed) return 0;
  1730. s->lzma2.compressed -= in_avail;
  1731. b->in_pos = s->rc.in_pos;
  1732. }
  1733. in_avail = b->in_size - b->in_pos;
  1734. if (in_avail < LZMA_IN_REQUIRED) {
  1735. if (in_avail > s->lzma2.compressed)
  1736. in_avail = s->lzma2.compressed;
  1737. memcpy(s->temp.buf, b->in + b->in_pos, in_avail);
  1738. s->temp.size = in_avail;
  1739. b->in_pos += in_avail;
  1740. }
  1741. return 1;
  1742. }
  1743. /*
  1744. * Take care of the LZMA2 control layer, and forward the job of actual LZMA
  1745. * decoding or copying of uncompressed chunks to other functions.
  1746. */
  1747. enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
  1748. struct xz_buf *b)
  1749. {
  1750. uint32_t tmp;
  1751. while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) {
  1752. switch (s->lzma2.sequence) {
  1753. case SEQ_CONTROL:
  1754. /*
  1755. * LZMA2 control byte
  1756. *
  1757. * Exact values:
  1758. * 0x00 End marker
  1759. * 0x01 Dictionary reset followed by
  1760. * an uncompressed chunk
  1761. * 0x02 Uncompressed chunk (no dictionary reset)
  1762. *
  1763. * Highest three bits (s->control & 0xE0):
  1764. * 0xE0 Dictionary reset, new properties and state
  1765. * reset, followed by LZMA compressed chunk
  1766. * 0xC0 New properties and state reset, followed
  1767. * by LZMA compressed chunk (no dictionary
  1768. * reset)
  1769. * 0xA0 State reset using old properties,
  1770. * followed by LZMA compressed chunk (no
  1771. * dictionary reset)
  1772. * 0x80 LZMA chunk (no dictionary or state reset)
  1773. *
  1774. * For LZMA compressed chunks, the lowest five bits
  1775. * (s->control & 1F) are the highest bits of the
  1776. * uncompressed size (bits 16-20).
  1777. *
  1778. * A new LZMA2 stream must begin with a dictionary
  1779. * reset. The first LZMA chunk must set new
  1780. * properties and reset the LZMA state.
  1781. *
  1782. * Values that don't match anything described above
  1783. * are invalid and we return XZ_DATA_ERROR.
  1784. */
  1785. tmp = b->in[b->in_pos++];
  1786. if (tmp == 0x00)
  1787. return XZ_STREAM_END;
  1788. if (tmp >= 0xE0 || tmp == 0x01) {
  1789. s->lzma2.need_props = 1;
  1790. s->lzma2.need_dict_reset = 0;
  1791. dict_reset(&s->dict);
  1792. } else if (s->lzma2.need_dict_reset) {
  1793. return XZ_DATA_ERROR;
  1794. }
  1795. if (tmp >= 0x80) {
  1796. s->lzma2.uncompressed = (tmp & 0x1F) << 16;
  1797. s->lzma2.sequence = SEQ_UNCOMPRESSED_1;
  1798. if (tmp >= 0xC0) {
  1799. /*
  1800. * When there are new properties,
  1801. * state reset is done at
  1802. * SEQ_PROPERTIES.
  1803. */
  1804. s->lzma2.need_props = 0;
  1805. s->lzma2.next_sequence
  1806. = SEQ_PROPERTIES;
  1807. } else if (s->lzma2.need_props) {
  1808. return XZ_DATA_ERROR;
  1809. } else {
  1810. s->lzma2.next_sequence
  1811. = SEQ_LZMA_PREPARE;
  1812. if (tmp >= 0xA0)
  1813. lzma_reset(s);
  1814. }
  1815. } else {
  1816. if (tmp > 0x02)
  1817. return XZ_DATA_ERROR;
  1818. s->lzma2.sequence = SEQ_COMPRESSED_0;
  1819. s->lzma2.next_sequence = SEQ_COPY;
  1820. }
  1821. break;
  1822. case SEQ_UNCOMPRESSED_1:
  1823. s->lzma2.uncompressed
  1824. += (uint32_t)b->in[b->in_pos++] << 8;
  1825. s->lzma2.sequence = SEQ_UNCOMPRESSED_2;
  1826. break;
  1827. case SEQ_UNCOMPRESSED_2:
  1828. s->lzma2.uncompressed
  1829. += (uint32_t)b->in[b->in_pos++] + 1;
  1830. s->lzma2.sequence = SEQ_COMPRESSED_0;
  1831. break;
  1832. case SEQ_COMPRESSED_0:
  1833. s->lzma2.compressed
  1834. = (uint32_t)b->in[b->in_pos++] << 8;
  1835. s->lzma2.sequence = SEQ_COMPRESSED_1;
  1836. break;
  1837. case SEQ_COMPRESSED_1:
  1838. s->lzma2.compressed
  1839. += (uint32_t)b->in[b->in_pos++] + 1;
  1840. s->lzma2.sequence = s->lzma2.next_sequence;
  1841. break;
  1842. case SEQ_PROPERTIES:
  1843. if (!lzma_props(s, b->in[b->in_pos++]))
  1844. return XZ_DATA_ERROR;
  1845. s->lzma2.sequence = SEQ_LZMA_PREPARE;
  1846. case SEQ_LZMA_PREPARE:
  1847. if (s->lzma2.compressed < RC_INIT_BYTES)
  1848. return XZ_DATA_ERROR;
  1849. if (!rc_read_init(&s->rc, b))
  1850. return XZ_OK;
  1851. s->lzma2.compressed -= RC_INIT_BYTES;
  1852. s->lzma2.sequence = SEQ_LZMA_RUN;
  1853. case SEQ_LZMA_RUN:
  1854. /*
  1855. * Set dictionary limit to indicate how much we want
  1856. * to be encoded at maximum. Decode new data into the
  1857. * dictionary. Flush the new data from dictionary to
  1858. * b->out. Check if we finished decoding this chunk.
  1859. * In case the dictionary got full but we didn't fill
  1860. * the output buffer yet, we may run this loop
  1861. * multiple times without changing s->lzma2.sequence.
  1862. */
  1863. dict_limit(&s->dict, minof(b->out_size - b->out_pos,
  1864. s->lzma2.uncompressed));
  1865. if (!lzma2_lzma(s, b))
  1866. return XZ_DATA_ERROR;
  1867. s->lzma2.uncompressed -= dict_flush(&s->dict, b);
  1868. if (s->lzma2.uncompressed == 0) {
  1869. if (s->lzma2.compressed > 0 || s->lzma.len > 0
  1870. || !rc_is_finished(&s->rc))
  1871. return XZ_DATA_ERROR;
  1872. rc_reset(&s->rc);
  1873. s->lzma2.sequence = SEQ_CONTROL;
  1874. } else if (b->out_pos == b->out_size
  1875. || (b->in_pos == b->in_size
  1876. && s->temp.size
  1877. < s->lzma2.compressed)) {
  1878. return XZ_OK;
  1879. }
  1880. break;
  1881. case SEQ_COPY:
  1882. dict_uncompressed(&s->dict, b, &s->lzma2.compressed);
  1883. if (s->lzma2.compressed > 0)
  1884. return XZ_OK;
  1885. s->lzma2.sequence = SEQ_CONTROL;
  1886. break;
  1887. }
  1888. }
  1889. return XZ_OK;
  1890. }
  1891. struct xz_dec_lzma2 *xz_dec_lzma2_create(uint32_t dict_max)
  1892. {
  1893. struct xz_dec_lzma2 *s = malloc(sizeof(*s));
  1894. if (s == NULL)
  1895. return NULL;
  1896. s->dict.size_max = dict_max;
  1897. s->dict.buf = NULL;
  1898. s->dict.allocated = 0;
  1899. return s;
  1900. }
  1901. enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
  1902. {
  1903. /* This limits dictionary size to 3 GiB to keep parsing simpler. */
  1904. if (props > 39)
  1905. return XZ_OPTIONS_ERROR;
  1906. s->dict.size = 2 + (props & 1);
  1907. s->dict.size <<= (props >> 1) + 11;
  1908. if (s->dict.size > s->dict.size_max)
  1909. return XZ_MEMLIMIT_ERROR;
  1910. s->dict.end = s->dict.size;
  1911. if (s->dict.allocated < s->dict.size) {
  1912. free(s->dict.buf);
  1913. s->dict.buf = malloc(s->dict.size);
  1914. if (s->dict.buf == NULL) {
  1915. s->dict.allocated = 0;
  1916. return XZ_MEM_ERROR;
  1917. }
  1918. }
  1919. s->lzma.len = 0;
  1920. s->lzma2.sequence = SEQ_CONTROL;
  1921. s->lzma2.need_dict_reset = 1;
  1922. s->temp.size = 0;
  1923. return XZ_OK;
  1924. }
  1925. /*
  1926. * .xz Stream decoder
  1927. */
  1928. // BEGIN xz_stream.h
  1929. /*
  1930. * Definitions for handling the .xz file format
  1931. */
  1932. /*
  1933. * See the .xz file format specification at
  1934. * http://tukaani.org/xz/xz-file-format.txt
  1935. * to understand the container format.
  1936. */
  1937. #define STREAM_HEADER_SIZE 12
  1938. #define HEADER_MAGIC "\3757zXZ"
  1939. #define HEADER_MAGIC_SIZE 6
  1940. #define FOOTER_MAGIC "YZ"
  1941. #define FOOTER_MAGIC_SIZE 2
  1942. /*
  1943. * Variable-length integer can hold a 63-bit unsigned integer or a special
  1944. * value indicating that the value is unknown.
  1945. *
  1946. * Experimental: vli_type can be defined to uint32_t to save a few bytes
  1947. * in code size (no effect on speed). Doing so limits the uncompressed and
  1948. * compressed size of the file to less than 256 MiB and may also weaken
  1949. * error detection slightly.
  1950. */
  1951. typedef uint64_t vli_type;
  1952. #define VLI_MAX ((vli_type)-1 / 2)
  1953. #define VLI_UNKNOWN ((vli_type)-1)
  1954. /* Maximum encoded size of a VLI */
  1955. #define VLI_BYTES_MAX (sizeof(vli_type) * 8 / 7)
  1956. /* Integrity Check types */
  1957. enum xz_check {
  1958. XZ_CHECK_NONE = 0,
  1959. XZ_CHECK_CRC32 = 1,
  1960. XZ_CHECK_CRC64 = 4,
  1961. XZ_CHECK_SHA256 = 10
  1962. };
  1963. /* Maximum possible Check ID */
  1964. #define XZ_CHECK_MAX 15
  1965. // END xz_stream.h
  1966. #define IS_CRC64(check_type) ((check_type) == XZ_CHECK_CRC64)
  1967. /* Hash used to validate the Index field */
  1968. struct xz_dec_hash {
  1969. vli_type unpadded;
  1970. vli_type uncompressed;
  1971. uint32_t crc32;
  1972. };
  1973. struct xz_dec {
  1974. /* Position in dec_main() */
  1975. enum {
  1976. SEQ_STREAM_HEADER,
  1977. SEQ_BLOCK_START,
  1978. SEQ_BLOCK_HEADER,
  1979. SEQ_BLOCK_UNCOMPRESS,
  1980. SEQ_BLOCK_PADDING,
  1981. SEQ_BLOCK_CHECK,
  1982. SEQ_INDEX,
  1983. SEQ_INDEX_PADDING,
  1984. SEQ_INDEX_CRC32,
  1985. SEQ_STREAM_FOOTER
  1986. } sequence;
  1987. /* Position in variable-length integers and Check fields */
  1988. uint32_t pos;
  1989. /* Variable-length integer decoded by dec_vli() */
  1990. vli_type vli;
  1991. /* Saved in_pos and out_pos */
  1992. size_t in_start;
  1993. size_t out_start;
  1994. /* CRC32 or CRC64 value in Block or CRC32 value in Index */
  1995. uint64_t crc;
  1996. /* Type of the integrity check calculated from uncompressed data */
  1997. enum xz_check check_type;
  1998. /*
  1999. * True if the next call to xz_dec_run() is allowed to return
  2000. * XZ_BUF_ERROR.
  2001. */
  2002. int allow_buf_error;
  2003. /* Information stored in Block Header */
  2004. struct {
  2005. /*
  2006. * Value stored in the Compressed Size field, or
  2007. * VLI_UNKNOWN if Compressed Size is not present.
  2008. */
  2009. vli_type compressed;
  2010. /*
  2011. * Value stored in the Uncompressed Size field, or
  2012. * VLI_UNKNOWN if Uncompressed Size is not present.
  2013. */
  2014. vli_type uncompressed;
  2015. /* Size of the Block Header field */
  2016. uint32_t size;
  2017. } block_header;
  2018. /* Information collected when decoding Blocks */
  2019. struct {
  2020. /* Observed compressed size of the current Block */
  2021. vli_type compressed;
  2022. /* Observed uncompressed size of the current Block */
  2023. vli_type uncompressed;
  2024. /* Number of Blocks decoded so far */
  2025. vli_type count;
  2026. /*
  2027. * Hash calculated from the Block sizes. This is used to
  2028. * validate the Index field.
  2029. */
  2030. struct xz_dec_hash hash;
  2031. } block;
  2032. /* Variables needed when verifying the Index field */
  2033. struct {
  2034. /* Position in dec_index() */
  2035. enum {
  2036. SEQ_INDEX_COUNT,
  2037. SEQ_INDEX_UNPADDED,
  2038. SEQ_INDEX_UNCOMPRESSED
  2039. } sequence;
  2040. /* Size of the Index in bytes */
  2041. vli_type size;
  2042. /* Number of Records (matches block.count in valid files) */
  2043. vli_type count;
  2044. /*
  2045. * Hash calculated from the Records (matches block.hash in
  2046. * valid files).
  2047. */
  2048. struct xz_dec_hash hash;
  2049. } index;
  2050. /*
  2051. * Temporary buffer needed to hold Stream Header, Block Header,
  2052. * and Stream Footer. The Block Header is the biggest (1 KiB)
  2053. * so we reserve space according to that. buf[] has to be aligned
  2054. * to a multiple of four bytes; the size_t variables before it
  2055. * should guarantee this.
  2056. */
  2057. struct {
  2058. size_t pos;
  2059. size_t size;
  2060. uint8_t buf[1024];
  2061. } temp;
  2062. struct xz_dec_lzma2 *lzma2;
  2063. #ifdef XZ_DEC_BCJ
  2064. struct xz_dec_bcj *bcj;
  2065. int bcj_active;
  2066. #endif
  2067. };
  2068. /* Sizes of the Check field with different Check IDs */
  2069. static const uint8_t check_sizes[16] = {
  2070. 0,
  2071. 4, 4, 4,
  2072. 8, 8, 8,
  2073. 16, 16, 16,
  2074. 32, 32, 32,
  2075. 64, 64, 64
  2076. };
  2077. /*
  2078. * Fill s->temp by copying data starting from b->in[b->in_pos]. Caller
  2079. * must have set s->temp.pos to indicate how much data we are supposed
  2080. * to copy into s->temp.buf. Return true once s->temp.pos has reached
  2081. * s->temp.size.
  2082. */
  2083. static int fill_temp(struct xz_dec *s, struct xz_buf *b)
  2084. {
  2085. size_t copy_size = minof(b->in_size - b->in_pos, s->temp.size - s->temp.pos);
  2086. memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size);
  2087. b->in_pos += copy_size;
  2088. s->temp.pos += copy_size;
  2089. if (s->temp.pos == s->temp.size) {
  2090. s->temp.pos = 0;
  2091. return 1;
  2092. }
  2093. return 0;
  2094. }
  2095. /* Decode a variable-length integer (little-endian base-128 encoding) */
  2096. static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in,
  2097. size_t *in_pos, size_t in_size)
  2098. {
  2099. uint8_t byte;
  2100. if (s->pos == 0)
  2101. s->vli = 0;
  2102. while (*in_pos < in_size) {
  2103. byte = in[*in_pos];
  2104. ++*in_pos;
  2105. s->vli |= (vli_type)(byte & 0x7F) << s->pos;
  2106. if ((byte & 0x80) == 0) {
  2107. /* Don't allow non-minimal encodings. */
  2108. if (byte == 0 && s->pos != 0)
  2109. return XZ_DATA_ERROR;
  2110. s->pos = 0;
  2111. return XZ_STREAM_END;
  2112. }
  2113. s->pos += 7;
  2114. if (s->pos == 7 * VLI_BYTES_MAX)
  2115. return XZ_DATA_ERROR;
  2116. }
  2117. return XZ_OK;
  2118. }
  2119. /*
  2120. * Decode the Compressed Data field from a Block. Update and validate
  2121. * the observed compressed and uncompressed sizes of the Block so that
  2122. * they don't exceed the values possibly stored in the Block Header
  2123. * (validation assumes that no integer overflow occurs, since vli_type
  2124. * is normally uint64_t). Update the CRC32 or CRC64 value if presence of
  2125. * the CRC32 or CRC64 field was indicated in Stream Header.
  2126. *
  2127. * Once the decoding is finished, validate that the observed sizes match
  2128. * the sizes possibly stored in the Block Header. Update the hash and
  2129. * Block count, which are later used to validate the Index field.
  2130. */
  2131. static enum xz_ret dec_block(struct xz_dec *s, struct xz_buf *b)
  2132. {
  2133. enum xz_ret ret;
  2134. s->in_start = b->in_pos;
  2135. s->out_start = b->out_pos;
  2136. #ifdef XZ_DEC_BCJ
  2137. if (s->bcj_active)
  2138. ret = xz_dec_bcj_run(s->bcj, s->lzma2, b);
  2139. else
  2140. #endif
  2141. ret = xz_dec_lzma2_run(s->lzma2, b);
  2142. s->block.compressed += b->in_pos - s->in_start;
  2143. s->block.uncompressed += b->out_pos - s->out_start;
  2144. /*
  2145. * There is no need to separately check for VLI_UNKNOWN, since
  2146. * the observed sizes are always smaller than VLI_UNKNOWN.
  2147. */
  2148. if (s->block.compressed > s->block_header.compressed
  2149. || s->block.uncompressed
  2150. > s->block_header.uncompressed)
  2151. return XZ_DATA_ERROR;
  2152. if (s->check_type == XZ_CHECK_CRC32)
  2153. s->crc = xz_crc32(b->out + s->out_start,
  2154. b->out_pos - s->out_start, s->crc);
  2155. else if (s->check_type == XZ_CHECK_CRC64) {
  2156. s->crc = ~(s->crc);
  2157. size_t size = b->out_pos - s->out_start;
  2158. uint8_t *buf = b->out + s->out_start;
  2159. while (size) {
  2160. s->crc = xz_crc64_table[*buf++ ^ (s->crc & 0xFF)] ^ (s->crc >> 8);
  2161. --size;
  2162. }
  2163. s->crc=~(s->crc);
  2164. }
  2165. if (ret == XZ_STREAM_END) {
  2166. if (s->block_header.compressed != VLI_UNKNOWN
  2167. && s->block_header.compressed
  2168. != s->block.compressed)
  2169. return XZ_DATA_ERROR;
  2170. if (s->block_header.uncompressed != VLI_UNKNOWN
  2171. && s->block_header.uncompressed
  2172. != s->block.uncompressed)
  2173. return XZ_DATA_ERROR;
  2174. s->block.hash.unpadded += s->block_header.size
  2175. + s->block.compressed;
  2176. s->block.hash.unpadded += check_sizes[s->check_type];
  2177. s->block.hash.uncompressed += s->block.uncompressed;
  2178. s->block.hash.crc32 = xz_crc32(
  2179. (const uint8_t *)&s->block.hash,
  2180. sizeof(s->block.hash), s->block.hash.crc32);
  2181. ++s->block.count;
  2182. }
  2183. return ret;
  2184. }
  2185. /* Update the Index size and the CRC32 value. */
  2186. static void index_update(struct xz_dec *s, const struct xz_buf *b)
  2187. {
  2188. size_t in_used = b->in_pos - s->in_start;
  2189. s->index.size += in_used;
  2190. s->crc = xz_crc32(b->in + s->in_start, in_used, s->crc);
  2191. }
  2192. /*
  2193. * Decode the Number of Records, Unpadded Size, and Uncompressed Size
  2194. * fields from the Index field. That is, Index Padding and CRC32 are not
  2195. * decoded by this function.
  2196. *
  2197. * This can return XZ_OK (more input needed), XZ_STREAM_END (everything
  2198. * successfully decoded), or XZ_DATA_ERROR (input is corrupt).
  2199. */
  2200. static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b)
  2201. {
  2202. enum xz_ret ret;
  2203. do {
  2204. ret = dec_vli(s, b->in, &b->in_pos, b->in_size);
  2205. if (ret != XZ_STREAM_END) {
  2206. index_update(s, b);
  2207. return ret;
  2208. }
  2209. switch (s->index.sequence) {
  2210. case SEQ_INDEX_COUNT:
  2211. s->index.count = s->vli;
  2212. /*
  2213. * Validate that the Number of Records field
  2214. * indicates the same number of Records as
  2215. * there were Blocks in the Stream.
  2216. */
  2217. if (s->index.count != s->block.count)
  2218. return XZ_DATA_ERROR;
  2219. s->index.sequence = SEQ_INDEX_UNPADDED;
  2220. break;
  2221. case SEQ_INDEX_UNPADDED:
  2222. s->index.hash.unpadded += s->vli;
  2223. s->index.sequence = SEQ_INDEX_UNCOMPRESSED;
  2224. break;
  2225. case SEQ_INDEX_UNCOMPRESSED:
  2226. s->index.hash.uncompressed += s->vli;
  2227. s->index.hash.crc32 = xz_crc32(
  2228. (const uint8_t *)&s->index.hash,
  2229. sizeof(s->index.hash),
  2230. s->index.hash.crc32);
  2231. --s->index.count;
  2232. s->index.sequence = SEQ_INDEX_UNPADDED;
  2233. break;
  2234. }
  2235. } while (s->index.count > 0);
  2236. return XZ_STREAM_END;
  2237. }
  2238. /*
  2239. * Validate that the next four or eight input bytes match the value
  2240. * of s->crc. s->pos must be zero when starting to validate the first byte.
  2241. * The "bits" argument allows using the same code for both CRC32 and CRC64.
  2242. */
  2243. static enum xz_ret crc_validate(struct xz_dec *s, struct xz_buf *b,
  2244. uint32_t bits)
  2245. {
  2246. do {
  2247. if (b->in_pos == b->in_size)
  2248. return XZ_OK;
  2249. if (((s->crc >> s->pos) & 0xFF) != b->in[b->in_pos++])
  2250. return XZ_DATA_ERROR;
  2251. s->pos += 8;
  2252. } while (s->pos < bits);
  2253. s->crc = 0;
  2254. s->pos = 0;
  2255. return XZ_STREAM_END;
  2256. }
  2257. /*
  2258. * Skip over the Check field when the Check ID is not supported.
  2259. * Returns true once the whole Check field has been skipped over.
  2260. */
  2261. static int check_skip(struct xz_dec *s, struct xz_buf *b)
  2262. {
  2263. while (s->pos < check_sizes[s->check_type]) {
  2264. if (b->in_pos == b->in_size) return 0;
  2265. ++b->in_pos;
  2266. ++s->pos;
  2267. }
  2268. s->pos = 0;
  2269. return 1;
  2270. }
  2271. /* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */
  2272. static enum xz_ret dec_stream_header(struct xz_dec *s)
  2273. {
  2274. if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE))
  2275. return XZ_FORMAT_ERROR;
  2276. if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0)
  2277. != get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2))
  2278. return XZ_DATA_ERROR;
  2279. if (s->temp.buf[HEADER_MAGIC_SIZE] != 0)
  2280. return XZ_OPTIONS_ERROR;
  2281. /*
  2282. * Of integrity checks, we support none (Check ID = 0),
  2283. * CRC32 (Check ID = 1), and optionally CRC64 (Check ID = 4).
  2284. * However, if XZ_DEC_ANY_CHECK is defined, we will accept other
  2285. * check types too, but then the check won't be verified and
  2286. * a warning (XZ_UNSUPPORTED_CHECK) will be given.
  2287. */
  2288. s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1];
  2289. if (s->check_type > XZ_CHECK_MAX)
  2290. return XZ_OPTIONS_ERROR;
  2291. if (s->check_type > XZ_CHECK_CRC32 && !IS_CRC64(s->check_type))
  2292. return XZ_UNSUPPORTED_CHECK;
  2293. return XZ_OK;
  2294. }
  2295. /* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */
  2296. static enum xz_ret dec_stream_footer(struct xz_dec *s)
  2297. {
  2298. if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE))
  2299. return XZ_DATA_ERROR;
  2300. if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf))
  2301. return XZ_DATA_ERROR;
  2302. /*
  2303. * Validate Backward Size. Note that we never added the size of the
  2304. * Index CRC32 field to s->index.size, thus we use s->index.size / 4
  2305. * instead of s->index.size / 4 - 1.
  2306. */
  2307. if ((s->index.size >> 2) != get_le32(s->temp.buf + 4))
  2308. return XZ_DATA_ERROR;
  2309. if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type)
  2310. return XZ_DATA_ERROR;
  2311. /*
  2312. * Use XZ_STREAM_END instead of XZ_OK to be more convenient
  2313. * for the caller.
  2314. */
  2315. return XZ_STREAM_END;
  2316. }
  2317. /* Decode the Block Header and initialize the filter chain. */
  2318. static enum xz_ret dec_block_header(struct xz_dec *s)
  2319. {
  2320. enum xz_ret ret;
  2321. /*
  2322. * Validate the CRC32. We know that the temp buffer is at least
  2323. * eight bytes so this is safe.
  2324. */
  2325. s->temp.size -= 4;
  2326. if (xz_crc32(s->temp.buf, s->temp.size, 0)
  2327. != get_le32(s->temp.buf + s->temp.size))
  2328. return XZ_DATA_ERROR;
  2329. s->temp.pos = 2;
  2330. /*
  2331. * Catch unsupported Block Flags. We support only one or two filters
  2332. * in the chain, so we catch that with the same test.
  2333. */
  2334. #ifdef XZ_DEC_BCJ
  2335. if (s->temp.buf[1] & 0x3E)
  2336. #else
  2337. if (s->temp.buf[1] & 0x3F)
  2338. #endif
  2339. return XZ_OPTIONS_ERROR;
  2340. /* Compressed Size */
  2341. if (s->temp.buf[1] & 0x40) {
  2342. if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size)
  2343. != XZ_STREAM_END)
  2344. return XZ_DATA_ERROR;
  2345. s->block_header.compressed = s->vli;
  2346. } else {
  2347. s->block_header.compressed = VLI_UNKNOWN;
  2348. }
  2349. /* Uncompressed Size */
  2350. if (s->temp.buf[1] & 0x80) {
  2351. if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size)
  2352. != XZ_STREAM_END)
  2353. return XZ_DATA_ERROR;
  2354. s->block_header.uncompressed = s->vli;
  2355. } else {
  2356. s->block_header.uncompressed = VLI_UNKNOWN;
  2357. }
  2358. #ifdef XZ_DEC_BCJ
  2359. /* If there are two filters, the first one must be a BCJ filter. */
  2360. s->bcj_active = s->temp.buf[1] & 0x01;
  2361. if (s->bcj_active) {
  2362. if (s->temp.size - s->temp.pos < 2)
  2363. return XZ_OPTIONS_ERROR;
  2364. ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]);
  2365. if (ret != XZ_OK)
  2366. return ret;
  2367. /*
  2368. * We don't support custom start offset,
  2369. * so Size of Properties must be zero.
  2370. */
  2371. if (s->temp.buf[s->temp.pos++] != 0x00)
  2372. return XZ_OPTIONS_ERROR;
  2373. }
  2374. #endif
  2375. /* Valid Filter Flags always take at least two bytes. */
  2376. if (s->temp.size - s->temp.pos < 2)
  2377. return XZ_DATA_ERROR;
  2378. /* Filter ID = LZMA2 */
  2379. if (s->temp.buf[s->temp.pos++] != 0x21)
  2380. return XZ_OPTIONS_ERROR;
  2381. /* Size of Properties = 1-byte Filter Properties */
  2382. if (s->temp.buf[s->temp.pos++] != 0x01)
  2383. return XZ_OPTIONS_ERROR;
  2384. /* Filter Properties contains LZMA2 dictionary size. */
  2385. if (s->temp.size - s->temp.pos < 1)
  2386. return XZ_DATA_ERROR;
  2387. ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]);
  2388. if (ret != XZ_OK)
  2389. return ret;
  2390. /* The rest must be Header Padding. */
  2391. while (s->temp.pos < s->temp.size)
  2392. if (s->temp.buf[s->temp.pos++] != 0x00)
  2393. return XZ_OPTIONS_ERROR;
  2394. s->temp.pos = 0;
  2395. s->block.compressed = 0;
  2396. s->block.uncompressed = 0;
  2397. return XZ_OK;
  2398. }
  2399. static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
  2400. {
  2401. enum xz_ret ret;
  2402. /*
  2403. * Store the start position for the case when we are in the middle
  2404. * of the Index field.
  2405. */
  2406. s->in_start = b->in_pos;
  2407. for (;;) {
  2408. switch (s->sequence) {
  2409. case SEQ_STREAM_HEADER:
  2410. /*
  2411. * Stream Header is copied to s->temp, and then
  2412. * decoded from there. This way if the caller
  2413. * gives us only little input at a time, we can
  2414. * still keep the Stream Header decoding code
  2415. * simple. Similar approach is used in many places
  2416. * in this file.
  2417. */
  2418. if (!fill_temp(s, b))
  2419. return XZ_OK;
  2420. /*
  2421. * If dec_stream_header() returns
  2422. * XZ_UNSUPPORTED_CHECK, it is still possible
  2423. * to continue decoding if working in multi-call
  2424. * mode. Thus, update s->sequence before calling
  2425. * dec_stream_header().
  2426. */
  2427. s->sequence = SEQ_BLOCK_START;
  2428. ret = dec_stream_header(s);
  2429. if (ret != XZ_OK)
  2430. return ret;
  2431. case SEQ_BLOCK_START:
  2432. /* We need one byte of input to continue. */
  2433. if (b->in_pos == b->in_size)
  2434. return XZ_OK;
  2435. /* See if this is the beginning of the Index field. */
  2436. if (b->in[b->in_pos] == 0) {
  2437. s->in_start = b->in_pos++;
  2438. s->sequence = SEQ_INDEX;
  2439. break;
  2440. }
  2441. /*
  2442. * Calculate the size of the Block Header and
  2443. * prepare to decode it.
  2444. */
  2445. s->block_header.size
  2446. = ((uint32_t)b->in[b->in_pos] + 1) * 4;
  2447. s->temp.size = s->block_header.size;
  2448. s->temp.pos = 0;
  2449. s->sequence = SEQ_BLOCK_HEADER;
  2450. case SEQ_BLOCK_HEADER:
  2451. if (!fill_temp(s, b))
  2452. return XZ_OK;
  2453. ret = dec_block_header(s);
  2454. if (ret != XZ_OK)
  2455. return ret;
  2456. s->sequence = SEQ_BLOCK_UNCOMPRESS;
  2457. case SEQ_BLOCK_UNCOMPRESS:
  2458. ret = dec_block(s, b);
  2459. if (ret != XZ_STREAM_END)
  2460. return ret;
  2461. s->sequence = SEQ_BLOCK_PADDING;
  2462. case SEQ_BLOCK_PADDING:
  2463. /*
  2464. * Size of Compressed Data + Block Padding
  2465. * must be a multiple of four. We don't need
  2466. * s->block.compressed for anything else
  2467. * anymore, so we use it here to test the size
  2468. * of the Block Padding field.
  2469. */
  2470. while (s->block.compressed & 3) {
  2471. if (b->in_pos == b->in_size)
  2472. return XZ_OK;
  2473. if (b->in[b->in_pos++] != 0)
  2474. return XZ_DATA_ERROR;
  2475. ++s->block.compressed;
  2476. }
  2477. s->sequence = SEQ_BLOCK_CHECK;
  2478. case SEQ_BLOCK_CHECK:
  2479. if (s->check_type == XZ_CHECK_CRC32) {
  2480. ret = crc_validate(s, b, 32);
  2481. if (ret != XZ_STREAM_END)
  2482. return ret;
  2483. }
  2484. else if (IS_CRC64(s->check_type)) {
  2485. ret = crc_validate(s, b, 64);
  2486. if (ret != XZ_STREAM_END)
  2487. return ret;
  2488. }
  2489. else if (!check_skip(s, b)) {
  2490. return XZ_OK;
  2491. }
  2492. s->sequence = SEQ_BLOCK_START;
  2493. break;
  2494. case SEQ_INDEX:
  2495. ret = dec_index(s, b);
  2496. if (ret != XZ_STREAM_END)
  2497. return ret;
  2498. s->sequence = SEQ_INDEX_PADDING;
  2499. case SEQ_INDEX_PADDING:
  2500. while ((s->index.size + (b->in_pos - s->in_start))
  2501. & 3) {
  2502. if (b->in_pos == b->in_size) {
  2503. index_update(s, b);
  2504. return XZ_OK;
  2505. }
  2506. if (b->in[b->in_pos++] != 0)
  2507. return XZ_DATA_ERROR;
  2508. }
  2509. /* Finish the CRC32 value and Index size. */
  2510. index_update(s, b);
  2511. /* Compare the hashes to validate the Index field. */
  2512. if (!memeq(&s->block.hash, &s->index.hash,
  2513. sizeof(s->block.hash)))
  2514. return XZ_DATA_ERROR;
  2515. s->sequence = SEQ_INDEX_CRC32;
  2516. case SEQ_INDEX_CRC32:
  2517. ret = crc_validate(s, b, 32);
  2518. if (ret != XZ_STREAM_END)
  2519. return ret;
  2520. s->temp.size = STREAM_HEADER_SIZE;
  2521. s->sequence = SEQ_STREAM_FOOTER;
  2522. case SEQ_STREAM_FOOTER:
  2523. if (!fill_temp(s, b))
  2524. return XZ_OK;
  2525. return dec_stream_footer(s);
  2526. }
  2527. }
  2528. /* Never reached */
  2529. }
  2530. /*
  2531. * xz_dec_run() is a wrapper for dec_main() to handle some special cases in
  2532. * multi-call and single-call decoding.
  2533. *
  2534. * In multi-call mode, we must return XZ_BUF_ERROR when it seems clear that we
  2535. * are not going to make any progress anymore. This is to prevent the caller
  2536. * from calling us infinitely when the input file is truncated or otherwise
  2537. * corrupt. Since zlib-style API allows that the caller fills the input buffer
  2538. * only when the decoder doesn't produce any new output, we have to be careful
  2539. * to avoid returning XZ_BUF_ERROR too easily: XZ_BUF_ERROR is returned only
  2540. * after the second consecutive call to xz_dec_run() that makes no progress.
  2541. *
  2542. * In single-call mode, if we couldn't decode everything and no error
  2543. * occurred, either the input is truncated or the output buffer is too small.
  2544. * Since we know that the last input byte never produces any output, we know
  2545. * that if all the input was consumed and decoding wasn't finished, the file
  2546. * must be corrupt. Otherwise the output buffer has to be too small or the
  2547. * file is corrupt in a way that decoding it produces too big output.
  2548. *
  2549. * If single-call decoding fails, we reset b->in_pos and b->out_pos back to
  2550. * their original values. This is because with some filter chains there won't
  2551. * be any valid uncompressed data in the output buffer unless the decoding
  2552. * actually succeeds (that's the price to pay of using the output buffer as
  2553. * the workspace).
  2554. */
  2555. enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
  2556. {
  2557. size_t in_start;
  2558. size_t out_start;
  2559. enum xz_ret ret;
  2560. in_start = b->in_pos;
  2561. out_start = b->out_pos;
  2562. ret = dec_main(s, b);
  2563. if (ret == XZ_OK && in_start == b->in_pos && out_start == b->out_pos) {
  2564. if (s->allow_buf_error)
  2565. ret = XZ_BUF_ERROR;
  2566. s->allow_buf_error = 1;
  2567. } else {
  2568. s->allow_buf_error = 0;
  2569. }
  2570. return ret;
  2571. }
  2572. struct xz_dec *xz_dec_init(uint32_t dict_max)
  2573. {
  2574. struct xz_dec *s = malloc(sizeof(*s));
  2575. if (!s)
  2576. return NULL;
  2577. #ifdef XZ_DEC_BCJ
  2578. s->bcj = malloc(sizeof(*s->bcj));
  2579. if (!s->bcj)
  2580. goto error_bcj;
  2581. #endif
  2582. s->lzma2 = xz_dec_lzma2_create(dict_max);
  2583. if (s->lzma2 == NULL)
  2584. goto error_lzma2;
  2585. xz_dec_reset(s);
  2586. return s;
  2587. error_lzma2:
  2588. #ifdef XZ_DEC_BCJ
  2589. free(s->bcj);
  2590. error_bcj:
  2591. #endif
  2592. free(s);
  2593. return NULL;
  2594. }
  2595. void xz_dec_reset(struct xz_dec *s)
  2596. {
  2597. s->sequence = SEQ_STREAM_HEADER;
  2598. s->allow_buf_error = 0;
  2599. s->pos = 0;
  2600. s->crc = 0;
  2601. memset(&s->block, 0, sizeof(s->block));
  2602. memset(&s->index, 0, sizeof(s->index));
  2603. s->temp.pos = 0;
  2604. s->temp.size = STREAM_HEADER_SIZE;
  2605. }
  2606. void xz_dec_end(struct xz_dec *s)
  2607. {
  2608. if (s != NULL) {
  2609. free((s->lzma2)->dict.buf);
  2610. free(s->lzma2);
  2611. #ifdef XZ_DEC_BCJ
  2612. free(s->bcj);
  2613. #endif
  2614. free(s);
  2615. }
  2616. }