You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

06_emb_ae.ipynb 17KB

3 months ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "code",
  5. "execution_count": 6,
  6. "id": "a50443d6-fe09-4905-b913-1be5f88c8c03",
  7. "metadata": {
  8. "tags": []
  9. },
  10. "outputs": [],
  11. "source": [
  12. "import numpy as np\n",
  13. "from tqdm import tqdm\n",
  14. "from sklearn.model_selection import train_test_split\n",
  15. "import torch\n",
  16. "import torch.nn as nn\n",
  17. "from transformers import T5Model"
  18. ]
  19. },
  20. {
  21. "cell_type": "code",
  22. "execution_count": 7,
  23. "id": "4e677034-dc27-4939-8ea2-71fcbb2da57d",
  24. "metadata": {
  25. "tags": []
  26. },
  27. "outputs": [],
  28. "source": [
  29. "np_rng = np.random.default_rng(seed=42)"
  30. ]
  31. },
  32. {
  33. "cell_type": "code",
  34. "execution_count": 8,
  35. "id": "3d139e0a-b8e3-427b-a537-44bc0f14ba46",
  36. "metadata": {
  37. "tags": []
  38. },
  39. "outputs": [
  40. {
  41. "data": {
  42. "text/plain": [
  43. "array([[ 0.09141512, -0.31199523],\n",
  44. " [ 0.22513536, 0.28216941],\n",
  45. " [-0.58531056, -0.39065385],\n",
  46. " [ 0.03835212, -0.09487278],\n",
  47. " [-0.00504035, -0.25591318],\n",
  48. " [ 0.26381939, 0.23333758],\n",
  49. " [ 0.01980921, 0.33817236],\n",
  50. " [ 0.1402528 , -0.25778774],\n",
  51. " [ 0.11062524, -0.28766478],\n",
  52. " [ 0.26353509, -0.01497777],\n",
  53. " [-0.05545871, -0.20427886],\n",
  54. " [ 0.3667624 , -0.04635884],\n",
  55. " [-0.12849835, -0.10564007],\n",
  56. " [ 0.15969276, 0.10963322],\n",
  57. " [ 0.12381978, 0.1292463 ],\n",
  58. " [ 0.64249428, -0.1219245 ],\n",
  59. " [-0.15367282, -0.24413182],\n",
  60. " [ 0.18479383, 0.33869169],\n",
  61. " [-0.03418424, -0.25204694],\n",
  62. " [-0.24734436, 0.19517784],\n",
  63. " [ 0.22297625, 0.16294628],\n",
  64. " [-0.19965291, 0.0696484 ],\n",
  65. " [ 0.03500574, 0.06560658],\n",
  66. " [ 0.26142863, 0.06707866],\n",
  67. " [ 0.20367407, 0.02027372],\n",
  68. " [ 0.08673582, 0.18938647],\n",
  69. " [-0.43714675, -0.09590136],\n",
  70. " [-0.1411118 , -0.19166335],\n",
  71. " [-0.08254268, 0.44848239],\n",
  72. " [-0.25974933, 0.29048351],\n",
  73. " [-0.50486093, -0.10046551],\n",
  74. " [ 0.04882592, 0.1758667 ]])"
  75. ]
  76. },
  77. "execution_count": 8,
  78. "metadata": {},
  79. "output_type": "execute_result"
  80. }
  81. ],
  82. "source": [
  83. "np_rng.normal(loc=0, scale=0.3, size=(32, 2))"
  84. ]
  85. },
  86. {
  87. "cell_type": "code",
  88. "execution_count": 9,
  89. "id": "544207bc-37fc-4376-9c63-bff44c72b32f",
  90. "metadata": {
  91. "tags": []
  92. },
  93. "outputs": [],
  94. "source": [
  95. "# BOTTLENECK_SIZE = 128\n",
  96. "TRAIN_BATCH_SIZE = 8192\n",
  97. "VALID_BATCH_SIZE = 8192\n",
  98. "RANDOM_SEED = 42\n",
  99. "\n",
  100. "DEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n"
  101. ]
  102. },
  103. {
  104. "cell_type": "code",
  105. "execution_count": 10,
  106. "id": "37d2d256-a348-402b-999d-1a4edce360c5",
  107. "metadata": {
  108. "tags": []
  109. },
  110. "outputs": [],
  111. "source": [
  112. "def train_valid_test_split(total_range, random_seed=RANDOM_SEED):\n",
  113. " train, testvalid = train_test_split(total_range, random_state=RANDOM_SEED, test_size=0.2)\n",
  114. " test, valid = train_test_split(testvalid, random_state=RANDOM_SEED, test_size=0.5)\n",
  115. " return train, valid, test\n",
  116. "\n",
  117. "def custom_dataloader(words_ids, batch_size, emb_dim, random_seed=RANDOM_SEED):\n",
  118. " np_rng = np.random.default_rng(seed=random_seed)\n",
  119. " while True:\n",
  120. " word_ids = np_rng.choice(words_ids, size=(batch_size, 2))\n",
  121. " additive_noise = np_rng.normal(loc=0, scale=0.1, size=(batch_size, emb_dim))\n",
  122. " alpha = np_rng.uniform(size=(batch_size, 1))\n",
  123. " yield torch.from_numpy(word_ids), torch.Tensor(additive_noise), torch.Tensor(alpha)\n",
  124. " \n",
  125. "class FakeEpoch:\n",
  126. " def __init__(self, dataloader, each_epoch_size):\n",
  127. " self.dataloader_iter = iter(dataloader)\n",
  128. " self.each_epoch_size = each_epoch_size\n",
  129. " \n",
  130. " def __len__(self):\n",
  131. " return self.each_epoch_size\n",
  132. " \n",
  133. " def __iter__(self):\n",
  134. " for _ in range(self.each_epoch_size):\n",
  135. " yield next(self.dataloader_iter)"
  136. ]
  137. },
  138. {
  139. "cell_type": "code",
  140. "execution_count": 11,
  141. "id": "644ae479-3f9a-426a-bd0b-4ec7694bc675",
  142. "metadata": {
  143. "tags": []
  144. },
  145. "outputs": [],
  146. "source": [
  147. "\n",
  148. "def ez_freeze(module):\n",
  149. " for param in module.parameters():\n",
  150. " param.requires_grad = False\n",
  151. " \n",
  152. "def ez_mlp(linear_dims, last_layer_bias=False):\n",
  153. " layers = []\n",
  154. " pairs_count = len(linear_dims) - 1\n",
  155. " for idx in range(pairs_count):\n",
  156. " in_dim, out_dim = linear_dims[idx], linear_dims[idx + 1]\n",
  157. " if idx == pairs_count - 1:\n",
  158. " layers.append(nn.Linear(in_dim, out_dim, bias=last_layer_bias))\n",
  159. " else:\n",
  160. " layers.append(nn.Linear(in_dim, out_dim, bias=True))\n",
  161. " layers.append(nn.ReLU())\n",
  162. " return nn.Sequential(*layers)\n",
  163. "\n",
  164. "def auto_encoder_model(linear_dims):\n",
  165. " return nn.Sequential(\n",
  166. " ez_mlp(linear_dims, last_layer_bias=False),\n",
  167. " nn.LayerNorm(linear_dims[-1]),\n",
  168. " ez_mlp(list(reversed(linear_dims)), last_layer_bias=True)\n",
  169. " )\n",
  170. "\n",
  171. "class AutoEncoderModel(nn.Module):\n",
  172. " def __init__(self, pretrained_name, bottleneck_sizes):\n",
  173. " super().__init__()\n",
  174. " \n",
  175. " self.bottleneck_size = bottleneck_sizes\n",
  176. " \n",
  177. " model = T5Model.from_pretrained(pretrained_name)\n",
  178. " self.emb_layer = model.get_encoder().get_input_embeddings()\n",
  179. " ez_freeze(self.emb_layer)\n",
  180. " \n",
  181. " self.auto_encoder = auto_encoder_model([\n",
  182. " self.embedding_dim,\n",
  183. " *bottleneck_sizes\n",
  184. " ])\n",
  185. " \n",
  186. " self.loss_fn = nn.MSELoss()\n",
  187. " \n",
  188. " def forward(self, word_ids, additive_noise, alpha):\n",
  189. " # word_ids.shape = (batch_size, 2)\n",
  190. " # additive_noise.shape = (batch_size, embedding_dim)\n",
  191. " # alpha.shape = (batch_size, 1)\n",
  192. " \n",
  193. " word_embs = self.emb_layer(word_ids)\n",
  194. " # word_embs.shape = (batch_size, 2, embedding_dim)\n",
  195. " \n",
  196. " word_combs = word_embs[:, 0] * alpha + word_embs[:, 1] * (1 - alpha)\n",
  197. " # word_combs.shape = (batch_size, embedding_dim)\n",
  198. " \n",
  199. " y_hat = self.auto_encoder(word_combs + additive_noise)\n",
  200. " loss = self.loss_fn(word_combs, y_hat)\n",
  201. " return loss, y_hat\n",
  202. " \n",
  203. " @property\n",
  204. " def embedding_dim(self):\n",
  205. " return self.emb_layer.embedding_dim\n",
  206. " \n",
  207. " @property\n",
  208. " def num_embeddings(self):\n",
  209. " return self.emb_layer.num_embeddings "
  210. ]
  211. },
  212. {
  213. "cell_type": "code",
  214. "execution_count": 12,
  215. "id": "aba28049-20bf-4ae6-9445-2f7c294686d8",
  216. "metadata": {
  217. "tags": []
  218. },
  219. "outputs": [],
  220. "source": [
  221. "model = AutoEncoderModel('google/t5-large-lm-adapt', bottleneck_sizes=[768, 768, 512, 512, 256, 256, 128, 128])"
  222. ]
  223. },
  224. {
  225. "cell_type": "code",
  226. "execution_count": 16,
  227. "id": "cac6bc39-ba12-4052-bd5f-8834f57cfa15",
  228. "metadata": {
  229. "tags": []
  230. },
  231. "outputs": [
  232. {
  233. "data": {
  234. "text/plain": [
  235. "tensor(96.9082)"
  236. ]
  237. },
  238. "execution_count": 16,
  239. "metadata": {},
  240. "output_type": "execute_result"
  241. }
  242. ],
  243. "source": [
  244. "(model.emb_layer.weight**2).mean()"
  245. ]
  246. },
  247. {
  248. "cell_type": "code",
  249. "execution_count": 6,
  250. "id": "afe2efbf-e703-4c43-8f7b-a87d303ea89e",
  251. "metadata": {
  252. "tags": []
  253. },
  254. "outputs": [],
  255. "source": [
  256. "train_ds, valid_ds, test_ds = train_valid_test_split(range(model.num_embeddings))\n",
  257. "train_loader = custom_dataloader(words_ids=train_ds, batch_size=TRAIN_BATCH_SIZE, emb_dim=model.embedding_dim)\n",
  258. "valid_loader = custom_dataloader(words_ids=valid_ds, batch_size=VALID_BATCH_SIZE, emb_dim=model.embedding_dim)"
  259. ]
  260. },
  261. {
  262. "cell_type": "code",
  263. "execution_count": 7,
  264. "id": "c24ccc1c-4cbe-4373-871e-9090dceb69a1",
  265. "metadata": {},
  266. "outputs": [],
  267. "source": [
  268. "train_loader = FakeEpoch(train_loader, 1000)\n",
  269. "valid_loader = FakeEpoch(valid_loader, 100)"
  270. ]
  271. },
  272. {
  273. "cell_type": "code",
  274. "execution_count": 8,
  275. "id": "71936e43-d718-45ef-8115-7fc63999ebd9",
  276. "metadata": {
  277. "tags": []
  278. },
  279. "outputs": [],
  280. "source": [
  281. "def _prefix_dict_keys(prefix, input_dict):\n",
  282. " return {f'{prefix}_{key}': val for key, val in input_dict.items()}\n",
  283. "\n",
  284. "def train_loop(model, loader, optimizer, use_tqdm=False):\n",
  285. " model.train()\n",
  286. "\n",
  287. " batch_losses = []\n",
  288. " \n",
  289. " if use_tqdm:\n",
  290. " loader = tqdm(loader, position=2, desc=\"Train Loop\", leave=False)\n",
  291. " \n",
  292. " for row in loader:\n",
  293. " optimizer.zero_grad()\n",
  294. " \n",
  295. " out = model(*(item.to(DEVICE) for item in row))\n",
  296. " loss = out[0]\n",
  297. " \n",
  298. " batch_loss_value = loss.item()\n",
  299. " loss.backward()\n",
  300. " optimizer.step()\n",
  301. " \n",
  302. " batch_losses.append(batch_loss_value)\n",
  303. " \n",
  304. " loss_value = np.mean(batch_losses)\n",
  305. " return _prefix_dict_keys('train', {\n",
  306. " 'loss': loss_value\n",
  307. " })\n",
  308. "\n",
  309. "def valid_loop(model, loader, use_tqdm=False):\n",
  310. " model.eval()\n",
  311. "\n",
  312. " batch_losses = []\n",
  313. " \n",
  314. " all_true = []\n",
  315. " all_pred = []\n",
  316. " \n",
  317. " if use_tqdm:\n",
  318. " loader = tqdm(loader, position=2, desc=\"Valid Loop\", leave=False)\n",
  319. " \n",
  320. " with torch.no_grad():\n",
  321. " for row in loader:\n",
  322. " out = model(*(item.to(DEVICE) for item in row))\n",
  323. " loss = out[0]\n",
  324. " \n",
  325. " batch_loss_value = loss.item()\n",
  326. "\n",
  327. " batch_losses.append(batch_loss_value)\n",
  328. "\n",
  329. " loss_value = np.mean(batch_losses)\n",
  330. " \n",
  331. " return_value = {\n",
  332. " 'loss': loss_value,\n",
  333. " }\n",
  334. " \n",
  335. " return _prefix_dict_keys('valid', return_value)"
  336. ]
  337. },
  338. {
  339. "cell_type": "code",
  340. "execution_count": 9,
  341. "id": "082b5384-827f-48b3-aa8e-40483668bbc0",
  342. "metadata": {
  343. "tags": []
  344. },
  345. "outputs": [
  346. {
  347. "ename": "KeyboardInterrupt",
  348. "evalue": "",
  349. "output_type": "error",
  350. "traceback": [
  351. "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
  352. "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
  353. "Cell \u001b[0;32mIn[9], line 8\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m epoch \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;241m1000\u001b[39m):\n\u001b[1;32m 5\u001b[0m epoch_results \u001b[38;5;241m=\u001b[39m {}\n\u001b[1;32m 7\u001b[0m epoch_results\u001b[38;5;241m.\u001b[39mupdate(\n\u001b[0;32m----> 8\u001b[0m \u001b[43mtrain_loop\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 9\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 10\u001b[0m \u001b[43m \u001b[49m\u001b[43mloader\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtrain_loader\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 11\u001b[0m \u001b[43m \u001b[49m\u001b[43moptimizer\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moptimizer\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 12\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_tqdm\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\n\u001b[1;32m 13\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 14\u001b[0m )\n\u001b[1;32m 16\u001b[0m epoch_results\u001b[38;5;241m.\u001b[39mupdate(\n\u001b[1;32m 17\u001b[0m valid_loop(\n\u001b[1;32m 18\u001b[0m model\u001b[38;5;241m=\u001b[39mmodel,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 21\u001b[0m )\n\u001b[1;32m 22\u001b[0m )\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28mprint\u001b[39m(epoch_results)\n",
  354. "Cell \u001b[0;32mIn[8], line 12\u001b[0m, in \u001b[0;36mtrain_loop\u001b[0;34m(model, loader, optimizer, use_tqdm)\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m use_tqdm:\n\u001b[1;32m 10\u001b[0m loader \u001b[38;5;241m=\u001b[39m tqdm(loader, position\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m2\u001b[39m, desc\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTrain Loop\u001b[39m\u001b[38;5;124m\"\u001b[39m, leave\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m)\n\u001b[0;32m---> 12\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m row \u001b[38;5;129;01min\u001b[39;00m loader:\n\u001b[1;32m 13\u001b[0m optimizer\u001b[38;5;241m.\u001b[39mzero_grad()\n\u001b[1;32m 15\u001b[0m out \u001b[38;5;241m=\u001b[39m model(\u001b[38;5;241m*\u001b[39m(item\u001b[38;5;241m.\u001b[39mto(DEVICE) \u001b[38;5;28;01mfor\u001b[39;00m item \u001b[38;5;129;01min\u001b[39;00m row))\n",
  355. "Cell \u001b[0;32mIn[3], line 24\u001b[0m, in \u001b[0;36mFakeEpoch.__iter__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__iter__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m _ \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39meach_epoch_size):\n\u001b[0;32m---> 24\u001b[0m \u001b[38;5;28;01myield\u001b[39;00m \u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdataloader_iter\u001b[49m\u001b[43m)\u001b[49m\n",
  356. "Cell \u001b[0;32mIn[3], line 10\u001b[0m, in \u001b[0;36mcustom_dataloader\u001b[0;34m(words_ids, batch_size, emb_dim, random_seed)\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[1;32m 9\u001b[0m word_ids \u001b[38;5;241m=\u001b[39m np_rng\u001b[38;5;241m.\u001b[39mchoice(words_ids, size\u001b[38;5;241m=\u001b[39m(batch_size, \u001b[38;5;241m2\u001b[39m))\n\u001b[0;32m---> 10\u001b[0m additive_noise \u001b[38;5;241m=\u001b[39m \u001b[43mnp_rng\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mnormal\u001b[49m\u001b[43m(\u001b[49m\u001b[43mloc\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mscale\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0.1\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msize\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mbatch_size\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43memb_dim\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 11\u001b[0m alpha \u001b[38;5;241m=\u001b[39m np_rng\u001b[38;5;241m.\u001b[39muniform(size\u001b[38;5;241m=\u001b[39m(batch_size, \u001b[38;5;241m1\u001b[39m))\n\u001b[1;32m 12\u001b[0m \u001b[38;5;28;01myield\u001b[39;00m torch\u001b[38;5;241m.\u001b[39mfrom_numpy(word_ids), torch\u001b[38;5;241m.\u001b[39mTensor(additive_noise), torch\u001b[38;5;241m.\u001b[39mTensor(alpha)\n",
  357. "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
  358. ]
  359. }
  360. ],
  361. "source": [
  362. "model.to(DEVICE)\n",
  363. "optimizer = torch.optim.AdamW(model.parameters(), lr=0.001)\n",
  364. "\n",
  365. "for epoch in range(1000):\n",
  366. " epoch_results = {}\n",
  367. "\n",
  368. " epoch_results.update(\n",
  369. " train_loop(\n",
  370. " model=model,\n",
  371. " loader=train_loader,\n",
  372. " optimizer=optimizer,\n",
  373. " use_tqdm=False\n",
  374. " )\n",
  375. " )\n",
  376. "\n",
  377. " epoch_results.update(\n",
  378. " valid_loop(\n",
  379. " model=model,\n",
  380. " loader=valid_loader,\n",
  381. " use_tqdm=False\n",
  382. " )\n",
  383. " )\n",
  384. " print(epoch_results)"
  385. ]
  386. },
  387. {
  388. "cell_type": "code",
  389. "execution_count": null,
  390. "id": "53425637-6146-41d2-b59e-4617ae1f8521",
  391. "metadata": {},
  392. "outputs": [],
  393. "source": []
  394. }
  395. ],
  396. "metadata": {
  397. "kernelspec": {
  398. "display_name": "Python [conda env:deep]",
  399. "language": "python",
  400. "name": "conda-env-deep-py"
  401. },
  402. "language_info": {
  403. "codemirror_mode": {
  404. "name": "ipython",
  405. "version": 3
  406. },
  407. "file_extension": ".py",
  408. "mimetype": "text/x-python",
  409. "name": "python",
  410. "nbconvert_exporter": "python",
  411. "pygments_lexer": "ipython3",
  412. "version": "3.10.11"
  413. }
  414. },
  415. "nbformat": 4,
  416. "nbformat_minor": 5
  417. }