diff --git a/Pipfile b/Pipfile index e9eaf166..ea970ced 100644 --- a/Pipfile +++ b/Pipfile @@ -4,15 +4,12 @@ verify_ssl = true name = "pypi" [packages] -"jinja2" = "*" -tensorflow = ">=1.6" -numpy = "*" -"idx2numpy" = "*" "e1839a8" = {path = ".", editable = true} -attrs = "*" [dev-packages] pylint = "*" "flake8" = "*" pytest = "*" rope = "*" +pillow = "*" +scipy = "*" diff --git a/Pipfile.lock b/Pipfile.lock index 83e868d5..64857a7a 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "7b64b2cd2c7f83a7b8fe9b174cfe10f60089ca6928c59870705bdb043b0c6e8d" + "sha256": "7d6e155897e7e165e7ab0db75866d7c2547a77f5c32b44c30ea9b7b3aa7fcc03" }, "pipfile-spec": 6, "requires": {}, @@ -16,9 +16,9 @@ "default": { "absl-py": { "hashes": [ - "sha256:e0eb8358b549552b1cc5972350bc3e41dd0a926c15b3ff95ce60f3c78c80824c" + "sha256:fcb729e9a3cf1a8f88f6c3a6465859c42116f23e1feb6208825eb88f3fd2b880" ], - "version": "==0.2.2" + "version": "==0.3.0" }, "astor": { "hashes": [ @@ -32,9 +32,15 @@ "sha256:4b90b09eeeb9b88c35bc642cbac057e45a5fd85367b985bd2809c62b7b939265", "sha256:e0d0eb91441a3b53dab4d9b743eafc1ac44476296a2053b6ca3af0b139faf87b" ], - "index": "pypi", "version": "==18.1.0" }, + "click": { + "hashes": [ + "sha256:29f99fc6125fbc931b758dc053b3114e55c77a6e4c6c3a2674a2dc986016381d", + "sha256:f15516df478d5a56180fbf80e68f206010e6d160fc39fa508b65e035fd75130b" + ], + "version": "==6.7" + }, "e1839a8": { "editable": true, "path": "." @@ -47,45 +53,45 @@ }, "grpcio": { "hashes": [ - "sha256:002f9170d8c0c10c33f643240c2332ce6eb8c8cc8c2b08d6a8f3172ef574751b", - "sha256:0f80b28033105e99e08d84361b899c45bd6eb31d2765ff2ed7cb66b8b1d12820", - "sha256:1012969abbec9a6c5d7b09ac829df296cb0a2ddebe70dd314abb881fa62cbcc9", - "sha256:19af04506fec213de9a889343d4b090e9d0b675e1d9b8397ea6fc6521f939a48", - "sha256:37cdffc26987ae2a077cc80fc0d87273e0e01ba15df40ec0a20d107e635700cf", - "sha256:4e72b30a276e519d687dc53d86ecf9d65edf31dad473f3bbd041542bbf9df12f", - "sha256:564499d84c2e90eb87819f7a299eaa2aee32db8208a8e8d00e8941a0c66413c0", - "sha256:6324581e215157f0fbe335dff2e21a65b4406db98ac7cca05f1e23b4f510b426", - "sha256:64758d2718f14792286b32d31560edb10c3726ce5d5875c3472c95908b658aeb", - "sha256:67b36c6b0070ef858e5438e82d3a3fb19db33a174572019744d7b965c95fa9b0", - "sha256:83766cdfc3492a693902eff193648b3cc9710e4a131815cd0cc60e30e9b7cf8f", - "sha256:935a0b328b79e03a47d87960836513bab1150d0faab44aff7968f8623ed48d62", - "sha256:982349cc24df7569ab955bafa3ba7575140db171c3bd757fa135d0c0c0d6990d", - "sha256:9c79eaca4b0b8fb973c6e0c4c6cd4be44e76dfd09d56cfc7b2a959289fda6682", - "sha256:9efcdfe1cc1670dde940f86e51fe080184f637106cf9b6d8dfc14cab9660d710", - "sha256:a24a37e441c36d6605029cfb035806ef4f888a37d757e64fb0488d03c3fff806", - "sha256:ab33b5965df89fd6e4ea7846cb9a28ece4e6d9ded23434d3321b992051a62de0", - "sha256:ad6c12e9ab809fd4f8891a085a7655ca2690a5753bb6258d9d602084f610a223", - "sha256:b1e4c83209bce5548029ed7df2af6a94415bb7dd37a2203183919d1d5a5249c9", - "sha256:b87cdf8c4291875bda4b2a0f6747cab008fd9ea6c682a43d0869d308a16d0956", - "sha256:b9e3793e0d1498e5c72993ba91f14e617e06e945d6562716bfbbbc6a9e7ab7de", - "sha256:ba388412f64d6ac02098fcb77c409896297b058a8942d946bd1999699d35c123", - "sha256:baaf1a0d5a5d9af67ec3ff0d9dfc1d642e7aba38e59ae60de1c6d1bd46406177", - "sha256:be0d79c3253f7d23facc4dab96ada086e9b17048a36843041a5fac3bad9415e7", - "sha256:ce3c23b1110238c1f440cdbceefd0c5fc7fcf3022c82c8a349514038aa69ac3d", - "sha256:d9b9d309e7db3a988df0d12ba3c1ca4a7059c502c10ce34d4d65779bebbb6949", - "sha256:ddd489b4730d7eccf9b836216d7137f85b3e68258a292a618a12e9ce5a7bcfb0", - "sha256:dee025675506fc84f475b9fb0c8ce2cc9b663f9d7c7a22281ba878538be78fe8", - "sha256:e6ac3198f4174c1b58e3a6b765d0b9cdcead231ba8bf4ddd30709320960b6e39", - "sha256:e738782d0216087cb7ee9acc54305db9a30bf9a784bff7a407b748a91dd8c942", - "sha256:f136b98861f27e2628f824c7c7e8d4bce47c9e18953fd00a0aca1d1c9cfd1b6c" - ], - "version": "==1.13.0" + "sha256:0087fb171150b93ebe98763b94dcab071d1af289569d6d163166f38b31ab2fc6", + "sha256:02349c8d4e33fa2d1f98b0493eeb517573cfdbbec6b1f20099e224bc3452ddab", + "sha256:03b546c32b67735cad029a45dc5b5281f7038a00c2b1a097779893f635c2bf81", + "sha256:19814e9e0cdc6ee4582fbb55ed178a35b4f5a8417096396628f8664b233441b8", + "sha256:19841f07caaf3d0fb12ae906a2c23e177d1c3304bc526c8251db2e5d6f23b31f", + "sha256:213da2a8df928eabb4723621b8e3b7ed52eb9c4e5d9bb14439723c1d9fa17a70", + "sha256:22a64e6a43f5e536326cec78e85c141e283d9ff44887a662c8672c9ba80e091d", + "sha256:334280c56a35453b8e7d636b3c8038bb979832ef6a620a475293a5f0d91ae208", + "sha256:33fc815a8752ddc844579f9ff724855561701dc3d52851aa0483e81e2f796af8", + "sha256:39ade6613e355dea85e69d8ce82447fee906f29b5603326e6ba8b44213f2611b", + "sha256:3a3baa77bd1183e963766f57a70475b970c8b20462f77e497b6062197c134437", + "sha256:435d6f91a583ae07776d92305c392ba4cfae39af8fce3fdbf8fa50b857f08f65", + "sha256:4bf23666e763ca7ff6010465864e9f088f4ac7ecc1e11abd6f85b250e66b2c05", + "sha256:4d95f832eb37f667186369182ccbd44dc163a8a81392576daaf3f039a139fdff", + "sha256:50712917e24057acea00be61eeac430e8d6790b5944826d9b63c928ce336d685", + "sha256:576c5edc856f6607818976055529be0f480c818f8fd09dff0e6c60e443d276c1", + "sha256:57c1b34424c51c385a982a4faf8f9d97f30102ff1b0111971b0b5c1bd4b182fe", + "sha256:6480da99906c8f7fb822179dba243de7dec2b0405302a9339cb6b6c597899ee1", + "sha256:679f92ec9185b58f9d944caa9c2b843537143732b9e61e195c4c6a594e1cb60a", + "sha256:6b5f34a1a1bde97f71058dd0ffe6c0d4432c944fa843a324879120f437d6bebd", + "sha256:6e633d69ec2b0c5c9ecf92106523695175af33953535188d0e6a1abf98af3aa4", + "sha256:78bcf7a7828b377f11f00d6146e3e56c529ebd016e2d8146ec4120cafa080839", + "sha256:8054ff2be82cf08d416e9596cad18977c28cbe25e442c2513aa6fb33ada7cb19", + "sha256:9122d053577fb36235733934680349a97af39d6f91675d16a2ff05d966095253", + "sha256:ab7144bf2c8c13eae4f16a08ae8a9bc8d9eef514c41312a2e3d3a5d5aab900c4", + "sha256:b18188e5bcebe4c3cacf8c174c5da05fc1da68aa99bf07e3b93a6570cbf8cf1c", + "sha256:bbbd2a993747e312fba0d7b6497a2b5c403d67995e0c59218ffe35dabe1121ff", + "sha256:c12ee6d554eac17b7b11b0baf97a31dde27e8ad5ebcfdc088fdff1118def62d7", + "sha256:d7b83e630d8b4dfe0a0de64d390e0160b352415b5aa3da09115b2602a79da437", + "sha256:d9e4b0dde1e74162ac57e3022a61ee80370c5c70414038166514c2a799995e1f", + "sha256:f363852c617c7e15760acf0d20b7c6db8bc4507f1507dfa35e21e6610005a598", + "sha256:f9bf9749f20757ccec8abce9f25666ae3bccfd747f9df2b83261942be982e3c7" + ], + "version": "==1.14.1" }, "idx2numpy": { "hashes": [ "sha256:c609d5b260e6e7b538d458ba266c55b5b478e43e581d24d7017351813e67fcae" ], - "index": "pypi", "version": "==1.2.2" }, "jinja2": { @@ -93,7 +99,6 @@ "sha256:74c935a1b8bb9a3947c50a54766a969d4846290e1e788ea44c1392163723c3bd", "sha256:f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4" ], - "index": "pypi", "version": "==2.10" }, "markdown": { @@ -140,7 +145,6 @@ "sha256:e1864a4e9f93ddb2dc6b62ccc2ec1f8250ff4ac0d3d7a15c8985dd4e1fbd6418", "sha256:e1d18421a7e2ad4a655b76e65d549d4159f8874c18a417464c1d439ee7ccc7cd" ], - "index": "pypi", "version": "==1.14.5" }, "protobuf": { @@ -153,7 +157,6 @@ "sha256:7d786f3ef5b33a04e6538089674f244a3b0f588155016559d950989010af97d0", "sha256:8bf82bb7a466a54be7272dcb492f71d55a2453a58d862fb74c3f2083f2768543", "sha256:9bbc1ae1c33c1bd3a2fc05a3aec328544d2b039ff0ce6f000063628a32fad777", - "sha256:9e992c68103ab5635728d29fcf132c669cb4e2db24d012685210276185009d17", "sha256:9f1087abb67b34e55108bc610936b34363a7aac692023bcbb17e065c253a1f80", "sha256:9fefcb92a3784b446abf3641d9a14dad815bee88e0edd10b9a9e0e144d01a991", "sha256:a37836aa47d1b81c2db1a6b7a5e79926062b5d76bd962115a0e615551be2b48d", @@ -172,28 +175,28 @@ }, "tensorboard": { "hashes": [ - "sha256:42a04637a636e16054b065907c81396b83a9702948ecd14218f19dc5cf85de98", - "sha256:97661706fbe857c372405e0f5bd7c3db2197b5e70cec88f6924b726fde65c2c1" + "sha256:64edbe66864e02719f85708ae01efe3448af964c042a502fd2046cc87a3b1f12", + "sha256:e4ea6ac2e47bf715b915f08a186e6205fa097318bd73f0b265d437b1d7834484" ], - "version": "==1.9.0" + "markers": "python_version >= '2.7' and python_version != '3.3.*' and python_version != '3.2.*' and python_version != '3.1.*' and python_version != '3.0.*'", + "version": "==1.10.0" }, "tensorflow": { "hashes": [ - "sha256:01fca4d85855131c874cff811e0786f240e7270d5dfa699883bde60f550752b6", - "sha256:22d71e13947b85ca8fb625db4d7094c68ebc014fee911db9d658205965268980", - "sha256:393de695264357de6505a7dc86c7787b6caf0c1ec54830fe8dad105d090fd6a2", - "sha256:3e8b783966a02d83027a92c3c55083ad8d8a0ddf4ad920f40ffaae107be79c78", - "sha256:42cc87627c3b0f2d60776412eaaea3aa0856650a77f94bec841535908f77ee45", - "sha256:51aa006ce0c7cbca3381e05bc7658f59cfec90a11480f2d35afd342cef8294d8", - "sha256:7486408e0ce0381edc2935c3cdbc4052d7a5e8c36083c058ffa013ac6047bb24", - "sha256:7d90a57373501b3fae0a109f5e5176883ec5417a43868970564b84ae9e64709d", - "sha256:9215db0fa590a2da6a08d6c7ca41d940727c40332c80c968f1459470d6bf8873", - "sha256:c6c8ec1b5a30e1fccebc4ddbee18b6df2bbaf3cf128ee086bc538738c1b3b9be", - "sha256:cd83f4a50c0eaf18526e4e56e519c3593d998d1d213df161bbc78fffdd097cb8", - "sha256:d351f7db08b8de322536c5886fada3e37feae809bfd37368050f9eeea544b87e" - ], - "index": "pypi", - "version": "==1.9.0" + "sha256:0b00e62c7f13a9935413ef71aa4c251699a0a2715955ee25f6f174bedd201b27", + "sha256:2aa07a64ea7ec238bc734c82f0bb11562c58bdcd61e976ff2076275bfbc9ab42", + "sha256:452b9547dd69c2e264263f651f11e5aca60dd911cddbc6cf34a01137fd39505a", + "sha256:4d95038155cf3c95d3d92aaf494442aaa8f787a87f82d889d8305579b554ab45", + "sha256:5bb2d1ff4321dda724885be8167ace3bf716708c8aff21bd622e047801915eb2", + "sha256:6f9aa4282a8890d79b83f2f55a2429675f2d0881964248a8d8838e761773e170", + "sha256:7344ea4207f25dccfd8fdd6da2283930190a34a90d301636b7281785c148682c", + "sha256:7c99f379fcc2fc8cf776471006947115996bbd985d6ee41aa26058f58bf4273e", + "sha256:814d717b68b83476a2bd3b5fba26949786d224770dfc7462d820e390409e5a6b", + "sha256:92b166baf82b1c5962d2661e9d5d01ebcd83d528d0295e48bf9896e5f2875d73", + "sha256:9eb5d117089b5cfa33c6dd32fd05574662a8167de53b686708b683e4af9bc687", + "sha256:a0ad5e1e9ccb230fbcbde78cd187b74526db3145899f5c639f453e246c0b80c0" + ], + "version": "==1.10.0" }, "termcolor": { "hashes": [ @@ -220,10 +223,10 @@ "develop": { "astroid": { "hashes": [ - "sha256:0ef2bf9f07c3150929b25e8e61b5198c27b0dca195e156f0e4d5bdd89185ca1a", - "sha256:fc9b582dba0366e63540982c3944a9230cbc6f303641c51483fa547dcc22393a" + "sha256:292fa429e69d60e4161e7612cb7cc8fa3609e2e309f80c224d93a76d5e7b58be", + "sha256:c7013d119ec95eb626f7a2011f0b63d0c9a095df9ad06d8507b37084eada1a8d" ], - "version": "==1.6.5" + "version": "==2.0.4" }, "atomicwrites": { "hashes": [ @@ -237,7 +240,6 @@ "sha256:4b90b09eeeb9b88c35bc642cbac057e45a5fd85367b985bd2809c62b7b939265", "sha256:e0d0eb91441a3b53dab4d9b743eafc1ac44476296a2053b6ca3af0b139faf87b" ], - "index": "pypi", "version": "==18.1.0" }, "flake8": { @@ -299,19 +301,88 @@ }, "more-itertools": { "hashes": [ - "sha256:2b6b9893337bfd9166bee6a62c2b0c9fe7735dcf85948b387ec8cba30e85d8e8", - "sha256:6703844a52d3588f951883005efcf555e49566a48afd4db4e965d69b883980d3", - "sha256:a18d870ef2ffca2b8463c0070ad17b5978056f403fb64e3f15fe62a52db21cc0" + "sha256:c187a73da93e7a8acc0001572aebc7e3c69daf7bf6881a2cea10650bd4420092", + "sha256:c476b5d3a34e12d40130bc2f935028b5f636df8f372dc2c1c01dc19681b2039e", + "sha256:fcbfeaea0be121980e15bc97b3817b5202ca73d0eae185b4550cbfce2a3ebb3d" + ], + "version": "==4.3.0" + }, + "numpy": { + "hashes": [ + "sha256:07379fe0b450f6fd6e5934a9bc015025bb4ce1c8fbed3ca8bef29328b1bc9570", + "sha256:085afac75bbc97a096744fcfc97a4b321c5a87220286811e85089ae04885acdd", + "sha256:2d6481c6bdab1c75affc0fc71eb1bd4b3ecef620d06f2f60c3f00521d54be04f", + "sha256:2df854df882d322d5c23087a4959e145b953dfff2abe1774fec4f639ac2f3160", + "sha256:381ad13c30cd1d0b2f3da8a0c1a4aa697487e8bb0e9e0cbeb7439776bcb645f8", + "sha256:385f1ce46e08676505b692bfde918c1e0b350963a15ef52d77691c2cf0f5dbf6", + "sha256:4130e5ae16c656b7de654dc5e595cfeb85d3a4b0bb0734d19c0dce6dc7ee0e07", + "sha256:4d278c2261be6423c5e63d8f0ceb1b0c6db3ff83f2906f4b860db6ae99ca1bb5", + "sha256:51c5dcb51cf88b34b7d04c15f600b07c6ccbb73a089a38af2ab83c02862318da", + "sha256:589336ba5199c8061239cf446ee2f2f1fcc0c68e8531ee1382b6fc0c66b2d388", + "sha256:5ae3564cb630e155a650f4f9c054589848e97836bebae5637240a0d8099f817b", + "sha256:5edf1acc827ed139086af95ce4449b7b664f57a8c29eb755411a634be280d9f2", + "sha256:6b82b81c6b3b70ed40bc6d0b71222ebfcd6b6c04a6e7945a936e514b9113d5a3", + "sha256:6c57f973218b776195d0356e556ec932698f3a563e2f640cfca7020086383f50", + "sha256:758d1091a501fd2d75034e55e7e98bfd1370dc089160845c242db1c760d944d9", + "sha256:8622db292b766719810e0cb0f62ef6141e15fe32b04e4eb2959888319e59336b", + "sha256:8b8dcfcd630f1981f0f1e3846fae883376762a0c1b472baa35b145b911683b7b", + "sha256:91fdd510743ae4df862dbd51a4354519dd9fb8941347526cd9c2194b792b3da9", + "sha256:97fa8f1dceffab782069b291e38c4c2227f255cdac5f1e3346666931df87373e", + "sha256:9b705f18b26fb551366ab6347ba9941b62272bf71c6bbcadcd8af94d10535241", + "sha256:9d69967673ab7b028c2df09cae05ba56bf4e39e3cb04ebe452b6035c3b49848e", + "sha256:9e1f53afae865cc32459ad211493cf9e2a3651a7295b7a38654ef3d123808996", + "sha256:a4a433b3a264dbc9aa9c7c241e87c0358a503ea6394f8737df1683c7c9a102ac", + "sha256:baadc5f770917ada556afb7651a68176559f4dca5f4b2d0947cd15b9fb84fb51", + "sha256:c725d11990a9243e6ceffe0ab25a07c46c1cc2c5dc55e305717b5afe856c9608", + "sha256:d696a8c87315a83983fc59dd27efe034292b9e8ad667aeae51a68b4be14690d9", + "sha256:e1864a4e9f93ddb2dc6b62ccc2ec1f8250ff4ac0d3d7a15c8985dd4e1fbd6418", + "sha256:e1d18421a7e2ad4a655b76e65d549d4159f8874c18a417464c1d439ee7ccc7cd" ], - "version": "==4.2.0" + "version": "==1.14.5" + }, + "pillow": { + "hashes": [ + "sha256:00def5b638994f888d1058e4d17c86dec8e1113c3741a0a8a659039aec59a83a", + "sha256:026449b64e559226cdb8e6d8c931b5965d8fc90ec18ebbb0baa04c5b36503c72", + "sha256:03dbb224ee196ef30ed2156d41b579143e1efeb422974719a5392fc035e4f574", + "sha256:03eb0e04f929c102ae24bc436bf1c0c60a4e63b07ebd388e84d8b219df3e6acd", + "sha256:1be66b9a89e367e7d20d6cae419794997921fe105090fafd86ef39e20a3baab2", + "sha256:1e977a3ed998a599bda5021fb2c2889060617627d3ae228297a529a082a3cd5c", + "sha256:22cf3406d135cfcc13ec6228ade774c8461e125c940e80455f500638429be273", + "sha256:24adccf1e834f82718c7fc8e3ec1093738da95144b8b1e44c99d5fc7d3e9c554", + "sha256:2a3e362c97a5e6a259ee9cd66553292a1f8928a5bdfa3622fdb1501570834612", + "sha256:3832e26ecbc9d8a500821e3a1d3765bda99d04ae29ffbb2efba49f5f788dc934", + "sha256:4fd1f0c2dc02aaec729d91c92cd85a2df0289d88e9f68d1e8faba750bb9c4786", + "sha256:4fda62030f2c515b6e2e673c57caa55cb04026a81968f3128aae10fc28e5cc27", + "sha256:5044d75a68b49ce36a813c82d8201384207112d5d81643937fc758c05302f05b", + "sha256:522184556921512ec484cb93bd84e0bab915d0ac5a372d49571c241a7f73db62", + "sha256:5914cff11f3e920626da48e564be6818831713a3087586302444b9c70e8552d9", + "sha256:6661a7908d68c4a133e03dac8178287aa20a99f841ea90beeb98a233ae3fd710", + "sha256:79258a8df3e309a54c7ef2ef4a59bb8e28f7e4a8992a3ad17c24b1889ced44f3", + "sha256:7d74c20b8f1c3e99d3f781d3b8ff5abfefdd7363d61e23bdeba9992ff32cc4b4", + "sha256:81918afeafc16ba5d9d0d4e9445905f21aac969a4ebb6f2bff4b9886da100f4b", + "sha256:8194d913ca1f459377c8a4ed8f9b7ad750068b8e0e3f3f9c6963fcc87a84515f", + "sha256:84d5d31200b11b3c76fab853b89ac898bf2d05c8b3da07c1fcc23feb06359d6e", + "sha256:989981db57abffb52026b114c9a1f114c7142860a6d30a352d28f8cbf186500b", + "sha256:a3d7511d3fad1618a82299aab71a5fceee5c015653a77ffea75ced9ef917e71a", + "sha256:b3ef168d4d6fd4fa6685aef7c91400f59f7ab1c0da734541f7031699741fb23f", + "sha256:c1c5792b6e74bbf2af0f8e892272c2a6c48efa895903211f11b8342e03129fea", + "sha256:c5dcb5a56aebb8a8f2585042b2f5c496d7624f0bcfe248f0cc33ceb2fd8d39e7", + "sha256:e2bed4a04e2ca1050bb5f00865cf2f83c0b92fd62454d9244f690fcd842e27a4", + "sha256:e87a527c06319428007e8c30511e1f0ce035cb7f14bb4793b003ed532c3b9333", + "sha256:f63e420180cbe22ff6e32558b612e75f50616fc111c5e095a4631946c782e109", + "sha256:f8b3d413c5a8f84b12cd4c5df1d8e211777c9852c6be3ee9c094b626644d3eab" + ], + "index": "pypi", + "version": "==5.2.0" }, "pluggy": { "hashes": [ - "sha256:7f8ae7f5bdf75671a718d2daf0a64b7885f74510bcd98b1a0bb420eb9a9d0cff", - "sha256:d345c8fe681115900d6da8d048ba67c25df42973bda370783cd58826442dcd7c", - "sha256:e160a7fcf25762bb60efc7e171d4497ff1d8d2d75a3d0df7a21b76821ecbf5c5" + "sha256:6e3836e39f4d36ae72840833db137f7b7d35105079aee6ec4a62d9f80d594dd1", + "sha256:95eb8364a4708392bae89035f45341871286a333f749c3141c20573d2b3876e1" ], - "version": "==0.6.0" + "markers": "python_version != '3.3.*' and python_version != '3.2.*' and python_version >= '2.7' and python_version != '3.0.*' and python_version != '3.1.*'", + "version": "==0.7.1" }, "py": { "hashes": [ @@ -336,26 +407,60 @@ }, "pylint": { "hashes": [ - "sha256:a48070545c12430cfc4e865bf62f5ad367784765681b3db442d8230f0960aa3c", - "sha256:fff220bcb996b4f7e2b0f6812fd81507b72ca4d8c4d05daf2655c333800cb9b3" + "sha256:1d6d3622c94b4887115fe5204982eee66fdd8a951cf98635ee5caee6ec98c3ec", + "sha256:31142f764d2a7cd41df5196f9933b12b7ee55e73ef12204b648ad7e556c119fb" ], "index": "pypi", - "version": "==1.9.2" + "version": "==2.1.1" }, "pytest": { "hashes": [ - "sha256:0453c8676c2bee6feb0434748b068d5510273a916295fd61d306c4f22fbfd752", - "sha256:4b208614ae6d98195430ad6bde03641c78553acee7c83cec2e85d613c0cd383d" + "sha256:86a8dbf407e437351cef4dba46736e9c5a6e3c3ac71b2e942209748e76ff2086", + "sha256:e74466e97ac14582a8188ff4c53e6cc3810315f342f6096899332ae864c1d432" ], "index": "pypi", - "version": "==3.6.3" + "version": "==3.7.1" }, "rope": { "hashes": [ - "sha256:a09edfd2034fd50099a67822f9bd851fbd0f4e98d3b87519f6267b60e50d80d1" + "sha256:a108c445e1cd897fe19272ab7877d172e7faf3d4148c80e7d20faba42ea8f7b2" + ], + "index": "pypi", + "version": "==0.11.0" + }, + "scipy": { + "hashes": [ + "sha256:0611ee97296265af4a21164a5323f8c1b4e8e15c582d3dfa7610825900136bb7", + "sha256:08237eda23fd8e4e54838258b124f1cd141379a5f281b0a234ca99b38918c07a", + "sha256:0e645dbfc03f279e1946cf07c9c754c2a1859cb4a41c5f70b25f6b3a586b6dbd", + "sha256:0e9bb7efe5f051ea7212555b290e784b82f21ffd0f655405ac4f87e288b730b3", + "sha256:108c16640849e5827e7d51023efb3bd79244098c3f21e4897a1007720cb7ce37", + "sha256:340ef70f5b0f4e2b4b43c8c8061165911bc6b2ad16f8de85d9774545e2c47463", + "sha256:3ad73dfc6f82e494195144bd3a129c7241e761179b7cb5c07b9a0ede99c686f3", + "sha256:3b243c77a822cd034dad53058d7c2abf80062aa6f4a32e9799c95d6391558631", + "sha256:404a00314e85eca9d46b80929571b938e97a143b4f2ddc2b2b3c91a4c4ead9c5", + "sha256:423b3ff76957d29d1cce1bc0d62ebaf9a3fdfaf62344e3fdec14619bb7b5ad3a", + "sha256:42d9149a2fff7affdd352d157fa5717033767857c11bd55aa4a519a44343dfef", + "sha256:625f25a6b7d795e8830cb70439453c9f163e6870e710ec99eba5722775b318f3", + "sha256:698c6409da58686f2df3d6f815491fd5b4c2de6817a45379517c92366eea208f", + "sha256:729f8f8363d32cebcb946de278324ab43d28096f36593be6281ca1ee86ce6559", + "sha256:8190770146a4c8ed5d330d5b5ad1c76251c63349d25c96b3094875b930c44692", + "sha256:878352408424dffaa695ffedf2f9f92844e116686923ed9aa8626fc30d32cfd1", + "sha256:8b984f0821577d889f3c7ca8445564175fb4ac7c7f9659b7c60bef95b2b70e76", + "sha256:8f841bbc21d3dad2111a94c490fb0a591b8612ffea86b8e5571746ae76a3deac", + "sha256:c22b27371b3866c92796e5d7907e914f0e58a36d3222c5d436ddd3f0e354227a", + "sha256:d0cdd5658b49a722783b8b4f61a6f1f9c75042d0e29a30ccb6cacc9b25f6d9e2", + "sha256:d40dc7f494b06dcee0d303e51a00451b2da6119acbeaccf8369f2d29e28917ac", + "sha256:d8491d4784aceb1f100ddb8e31239c54e4afab8d607928a9f7ef2469ec35ae01", + "sha256:dfc5080c38dde3f43d8fbb9c0539a7839683475226cf83e4b24363b227dfe552", + "sha256:e24e22c8d98d3c704bb3410bce9b69e122a8de487ad3dbfe9985d154e5c03a40", + "sha256:e7a01e53163818d56eabddcafdc2090e9daba178aad05516b20c6591c4811020", + "sha256:ee677635393414930541a096fc8e61634304bb0153e4e02b75685b11eba14cae", + "sha256:f0521af1b722265d824d6ad055acfe9bd3341765735c44b5a4d0069e189a0f40", + "sha256:f25c281f12c0da726c6ed00535ca5d1622ec755c30a3f8eafef26cf43fede694" ], "index": "pypi", - "version": "==0.10.7" + "version": "==1.1.0" }, "six": { "hashes": [ @@ -364,6 +469,35 @@ ], "version": "==1.11.0" }, + "typed-ast": { + "hashes": [ + "sha256:0948004fa228ae071054f5208840a1e88747a357ec1101c17217bfe99b299d58", + "sha256:10703d3cec8dcd9eef5a630a04056bbc898abc19bac5691612acba7d1325b66d", + "sha256:1f6c4bd0bdc0f14246fd41262df7dfc018d65bb05f6e16390b7ea26ca454a291", + "sha256:25d8feefe27eb0303b73545416b13d108c6067b846b543738a25ff304824ed9a", + "sha256:29464a177d56e4e055b5f7b629935af7f49c196be47528cc94e0a7bf83fbc2b9", + "sha256:2e214b72168ea0275efd6c884b114ab42e316de3ffa125b267e732ed2abda892", + "sha256:3e0d5e48e3a23e9a4d1a9f698e32a542a4a288c871d33ed8df1b092a40f3a0f9", + "sha256:519425deca5c2b2bdac49f77b2c5625781abbaf9a809d727d3a5596b30bb4ded", + "sha256:57fe287f0cdd9ceaf69e7b71a2e94a24b5d268b35df251a88fef5cc241bf73aa", + "sha256:668d0cec391d9aed1c6a388b0d5b97cd22e6073eaa5fbaa6d2946603b4871efe", + "sha256:68ba70684990f59497680ff90d18e756a47bf4863c604098f10de9716b2c0bdd", + "sha256:6de012d2b166fe7a4cdf505eee3aaa12192f7ba365beeefaca4ec10e31241a85", + "sha256:79b91ebe5a28d349b6d0d323023350133e927b4de5b651a8aa2db69c761420c6", + "sha256:8550177fa5d4c1f09b5e5f524411c44633c80ec69b24e0e98906dd761941ca46", + "sha256:898f818399cafcdb93cbbe15fc83a33d05f18e29fb498ddc09b0214cdfc7cd51", + "sha256:94b091dc0f19291adcb279a108f5d38de2430411068b219f41b343c03b28fb1f", + "sha256:a26863198902cda15ab4503991e8cf1ca874219e0118cbf07c126bce7c4db129", + "sha256:a8034021801bc0440f2e027c354b4eafd95891b573e12ff0418dec385c76785c", + "sha256:bc978ac17468fe868ee589c795d06777f75496b1ed576d308002c8a5756fb9ea", + "sha256:c05b41bc1deade9f90ddc5d988fe506208019ebba9f2578c622516fd201f5863", + "sha256:c9b060bd1e5a26ab6e8267fd46fc9e02b54eb15fffb16d112d4c7b1c12987559", + "sha256:edb04bdd45bfd76c8292c4d9654568efaedf76fe78eb246dde69bdb13b2dad87", + "sha256:f19f2a4f547505fe9072e15f6f4ae714af51b5a681a97f187971f50c283193b6" + ], + "markers": "python_version < '3.7' and implementation_name == 'cpython'", + "version": "==1.1.0" + }, "wrapt": { "hashes": [ "sha256:d4d560d479f2c21e1b5443bbd15fe7ec4b37fe7e53d335d3b9b0a7b1226fe3c6" diff --git a/README.md b/README.md index f1ef0704..dd6d915e 100644 --- a/README.md +++ b/README.md @@ -52,9 +52,10 @@ Following steps are a general guild for user how to porting a `TensorFlow` proto 1. install `utensor_cgent` - run `python3 setupt.py install` -2. run `utensor-cli graph.pb --output-nodes=NODE,NODE,...` +2. run `utensor-cli convert --output-nodes='NODE,NODE,...' graph.pb` - run `utensor-cli -h` for help - the `graph.pb` is the pb file of *original* graph (not quantized) +3. If you want to see what ops/nodes are in the pb file, you can run `utensor-cli show ` # How to test (for Developer) diff --git a/requirements.txt b/requirements.txt index 1cbc54cd..c5391a59 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,13 +1,18 @@ absl-py==0.2.2 -astor==0.6.2 +astor==0.7.1 astroid==1.6.5 atomicwrites==1.1.5 attrs==18.1.0 -bleach==1.5.0 +backports.functools-lru-cache==1.5 +backports.weakref==1.0.post1 +click==6.7 +configparser==3.5.0 +enum34==1.1.6 flake8==3.5.0 +funcsigs==1.0.2 +futures==3.2.0 gast==0.2.0 -grpcio==1.12.1 -html5lib==0.9999999 +grpcio==1.13.0 idx2numpy==1.2.2 isort==4.3.4 Jinja2==2.10 @@ -15,20 +20,23 @@ lazy-object-proxy==1.3.1 Markdown==2.6.11 MarkupSafe==1.0 mccabe==0.6.1 +mock==2.0.0 more-itertools==4.2.0 -numpy==1.14.3 +numpy==1.14.5 +pbr==4.1.1 pluggy==0.6.0 -protobuf==3.5.2.post1 -py==1.5.3 +protobuf==3.6.0 +py==1.5.4 pycodestyle==2.3.1 pyflakes==1.6.0 pylint==1.9.2 -pytest==3.6.1 +pytest==3.6.3 rope==0.10.7 +singledispatch==3.4.0.3 six==1.11.0 -tensorboard==1.8.0 -tensorflow==1.8.0 +tensorboard==1.9.0 +tensorflow==1.9.0 termcolor==1.1.0 --e git+https://github.com/uTensor/utensor_cgen.git@f7ff03eef8653818aa47652f673509daa9b7a8f1#egg=utensor_cgen +-e git+https://github.com/uTensor/utensor_cgen.git@ec8a9444a52a280473ae56d86a73a66d4b188699#egg=utensor_cgen Werkzeug==0.14.1 wrapt==1.10.11 diff --git a/setup.py b/setup.py index 937f8b40..390978a6 100644 --- a/setup.py +++ b/setup.py @@ -25,14 +25,14 @@ package_data={"utensor_cgen": ["templates/*"]}, entry_points={ "console_scripts": [ - "utensor-cli=utensor_cgen.__main__:cli" + "utensor-cli=utensor_cgen.cli:cli" ]}, install_requires=[ 'Jinja2', 'tensorflow', - 'numpy', 'idx2numpy', - 'attrs' + 'attrs', + 'click' ], extras_require={ 'dev': ['pytest'] diff --git a/tests/deep_cnn/.gitignore b/tests/deep_cnn/.gitignore new file mode 100644 index 00000000..1269488f --- /dev/null +++ b/tests/deep_cnn/.gitignore @@ -0,0 +1 @@ +data diff --git a/tests/deep_cnn/cifar/__init__.py b/tests/deep_cnn/cifar/__init__.py new file mode 100644 index 00000000..68c5c52d --- /dev/null +++ b/tests/deep_cnn/cifar/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf8 -*- +from __future__ import absolute_import + +from ._cifar import * diff --git a/tests/deep_cnn/cifar/_cifar.py b/tests/deep_cnn/cifar/_cifar.py new file mode 100644 index 00000000..ea7157e2 --- /dev/null +++ b/tests/deep_cnn/cifar/_cifar.py @@ -0,0 +1,100 @@ +# -*- coding: utf8 -*- +from __future__ import print_function +from __future__ import absolute_import +import os +import tarfile + +import numpy as np +from tensorflow.python.platform import gfile +from tensorflow.python.framework import dtypes +from tensorflow.contrib.learn.python.learn.datasets import base +from .dataset import DataSet, dense_to_one_hot +from .cs231n.data_utils import load_CIFAR10 + +__all__ = ["read_data_sets", "get_class_names", "onehot_to_names"] + +_SOURCE_URL = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz" +_LABELS_MAP = {0: 'plane', 1: 'car', 2: 'bird', + 3: 'cat', 4: 'deer', 5: 'dog', + 6: 'frog', 7: 'horse', 8: 'ship', + 9: 'truck'} + + +def read_data_sets(work_dir, + fake_data=False, + one_hot=False, + dtype=dtypes.float32, + reshape=True, + validation_size=None, + seed=None): + if fake_data: + def fake(): + return DataSet([], [], + fake_data=True, + image_dims=32*32*3, + num_class=10, + one_hot=one_hot, + dtype=dtype, + seed=seed) + + train = fake() + validation = fake() + test = fake() + return base.Datasets(train=train, validation=validation, test=test) + + root_data_dir = os.path.join(work_dir, "cifar-10-batches-py") + if not os.path.exists(root_data_dir): + # no data directory found + # download gz file + print("Trying to download cifar data (if the tar.gz file is not available)") + gz_fpath = base.maybe_download("cifar-10-python.tar.gz", + work_dir, + _SOURCE_URL) + print("Extracting data in {}".format(root_data_dir)) + with tarfile.open(gz_fpath) as tar: + tar.extractall(work_dir) + else: + print("cifar data directory found {}".format(root_data_dir)) + print("loading data...") + X_train, Y_train, X_test, Y_test = load_CIFAR10(root_data_dir) + if one_hot: + num_class_train = len(np.unique(Y_train)) + num_class_test = len(np.unique(Y_test)) + assert num_class_test == num_class_train, \ + "number of classes mismatch: {} and {}".format(num_class_train, num_class_test) + Y_train = dense_to_one_hot(Y_train, num_class_train) + Y_test = dense_to_one_hot(Y_test, num_class_test) + if validation_size is None: + validation_size = int(X_train.shape[0]/10) + valid_idx = np.random.choice(range(X_train.shape[0]), validation_size) + mask = np.array([True if row_idx in valid_idx else False for row_idx in range(X_train.shape[0])]) + X_train, X_valid = X_train[~mask], X_train[mask] + Y_train, Y_valid = Y_train[~mask], Y_train[mask] + + train_dataset = DataSet(X_train, Y_train, + one_hot=one_hot, + dtype=dtype, + reshape=reshape, + seed=seed) + valid_dataset = DataSet(X_valid, Y_valid, + one_hot=one_hot, + dtype=dtype, + reshape=reshape, + seed=seed) + test_dataset = DataSet(X_test, Y_test, + one_hot=one_hot, + dtype=dtype, + reshape=reshape, + seed=seed) + return base.Datasets(train=train_dataset, + validation=valid_dataset, + test=test_dataset) + + +def get_class_names(labels): + return np.vectorize(_LABELS_MAP.get)(labels) + + +def onehot_to_names(one_hot): + labels = np.argmax(one_hot, axis=1) + return get_class_names(labels) diff --git a/tests/deep_cnn/cifar/cs231n/__init__.py b/tests/deep_cnn/cifar/cs231n/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/deep_cnn/cifar/cs231n/data_utils.py b/tests/deep_cnn/cifar/cs231n/data_utils.py new file mode 100644 index 00000000..70cde17a --- /dev/null +++ b/tests/deep_cnn/cifar/cs231n/data_utils.py @@ -0,0 +1,40 @@ +from __future__ import print_function + +from six.moves import cPickle as pickle +import numpy as np +import os +from scipy.misc import imread +import platform + +def load_pickle(f): + version = platform.python_version_tuple() + if version[0] == '2': + return pickle.load(f) + elif version[0] == '3': + return pickle.load(f, encoding='latin1') + raise ValueError("invalid python version: {}".format(version)) + +def load_CIFAR_batch(filename): + """ load single batch of cifar """ + with open(filename, 'rb') as f: + datadict = load_pickle(f) + X = datadict['data'] + Y = datadict['labels'] + X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("float") + Y = np.array(Y) + return X, Y + +def load_CIFAR10(ROOT): + """ load all of cifar """ + xs = [] + ys = [] + for b in range(1,6): + f = os.path.join(ROOT, 'data_batch_%d' % (b, )) + X, Y = load_CIFAR_batch(f) + xs.append(X) + ys.append(Y) + Xtr = np.concatenate(xs) + Ytr = np.concatenate(ys) + del X, Y + Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch')) + return Xtr, Ytr, Xte, Yte diff --git a/tests/deep_cnn/cifar/dataset.py b/tests/deep_cnn/cifar/dataset.py new file mode 100644 index 00000000..b8c9becb --- /dev/null +++ b/tests/deep_cnn/cifar/dataset.py @@ -0,0 +1,126 @@ +# -*- coding: utf8 -*- +# this file is (mostly) adapt from Tensorflow source code +from __future__ import print_function +from functools import reduce +import numpy +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import random_seed + +def dense_to_one_hot(labels_dense, num_classes): + """Convert class labels from scalars to one-hot vectors.""" + num_labels = labels_dense.shape[0] + index_offset = numpy.arange(num_labels) * num_classes + labels_one_hot = numpy.zeros((num_labels, num_classes)) + labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 + return labels_one_hot + +class DataSet(object): + + def __init__(self, + images, + labels, + fake_data=False, + image_dims = None, + num_class = None, + one_hot=False, + dtype=dtypes.float32, + reshape=True, + seed=None): + """Construct a DataSet. + one_hot arg is used only if fake_data is true. `dtype` can be either + `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into + `[0, 1]`. Seed arg provides for convenient deterministic testing. + """ + seed1, seed2 = random_seed.get_seed(seed) + # If op level seed is not set, use whatever graph level seed is returned + numpy.random.seed(seed1 if seed is None else seed2) + dtype = dtypes.as_dtype(dtype).base_dtype + if dtype not in (dtypes.uint8, dtypes.float32): + raise TypeError('Invalid image dtype %r, expected uint8 or float32' % + dtype) + if fake_data: + self._num_examples = 10000 + self.one_hot = one_hot + assert image_dims is not None, \ + "must give image_dims if fake_data is True: get {}".format(image_dims) + self._image_dims = image_dims + assert num_class is not None, \ + "must give num_class if fake_data is True: get {}".format(num_class) + self._num_class = num_class + else: + assert images.shape[0] == labels.shape[0], ( + 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) + self._num_examples = images.shape[0] + + # Convert shape from [num examples, rows, columns, depth] + # to [num examples, rows*columns*depth] + if reshape: + images = images.reshape(images.shape[0], -1) + if dtype == dtypes.float32: + # Convert from [0, 255] -> [0.0, 1.0]. + images = images.astype(numpy.float32) + images = numpy.multiply(images, 1.0 / 255.0) + self._images = images + self._labels = labels + self._epochs_completed = 0 + self._index_in_epoch = 0 + + @property + def images(self): + return self._images + + @property + def labels(self): + return self._labels + + @property + def num_examples(self): + return self._num_examples + + @property + def epochs_completed(self): + return self._epochs_completed + + def next_batch(self, batch_size, fake_data=False, shuffle=True): + """Return the next `batch_size` examples from this data set.""" + if fake_data: + fake_image = [1] * self._image_dims + if self.one_hot: + fake_label = [1] + [0] * (self._num_class-1) + else: + fake_label = 0 + return [fake_image for _ in xrange(batch_size)], [ + fake_label for _ in xrange(batch_size) + ] + start = self._index_in_epoch + # Shuffle for the first epoch + if self._epochs_completed == 0 and start == 0 and shuffle: + perm0 = numpy.arange(self._num_examples) + numpy.random.shuffle(perm0) + self._images = self.images[perm0] + self._labels = self.labels[perm0] + # Go to the next epoch + if start + batch_size > self._num_examples: + # Finished epoch + self._epochs_completed += 1 + # Get the rest examples in this epoch + rest_num_examples = self._num_examples - start + images_rest_part = self._images[start:self._num_examples] + labels_rest_part = self._labels[start:self._num_examples] + # Shuffle the data + if shuffle: + perm = numpy.arange(self._num_examples) + numpy.random.shuffle(perm) + self._images = self.images[perm] + self._labels = self.labels[perm] + # Start next epoch + start = 0 + self._index_in_epoch = batch_size - rest_num_examples + end = self._index_in_epoch + images_new_part = self._images[start:end] + labels_new_part = self._labels[start:end] + return numpy.concatenate((images_rest_part, images_new_part), axis=0) , numpy.concatenate((labels_rest_part, labels_new_part), axis=0) + else: + self._index_in_epoch += batch_size + end = self._index_in_epoch + return self._images[start:end], self._labels[start:end] diff --git a/tests/deep_cnn/cifar10_cnn.pb b/tests/deep_cnn/cifar10_cnn.pb new file mode 100644 index 00000000..144de203 Binary files /dev/null and b/tests/deep_cnn/cifar10_cnn.pb differ diff --git a/tests/deep_cnn/cnn_weights.pkl b/tests/deep_cnn/cnn_weights.pkl new file mode 100644 index 00000000..4b91520d Binary files /dev/null and b/tests/deep_cnn/cnn_weights.pkl differ diff --git a/tests/deep_cnn/deep_cnn.ipynb b/tests/deep_cnn/deep_cnn.ipynb index a8b249c8..045b4698 100644 --- a/tests/deep_cnn/deep_cnn.ipynb +++ b/tests/deep_cnn/deep_cnn.ipynb @@ -5,12 +5,27 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-11T08:29:18.832660Z", - "start_time": "2018-05-11T08:29:14.595824Z" + "end_time": "2018-08-11T14:18:18.722370Z", + "start_time": "2018-08-11T14:18:18.713958Z" } }, "outputs": [], "source": [ + "import pickle" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2018-08-11T14:18:23.481655Z", + "start_time": "2018-08-11T14:18:19.346213Z" + } + }, + "outputs": [], + "source": [ + "import numpy as np\n", "import tensorflow as tf\n", "from tensorflow.tools.graph_transforms import TransformGraph\n", "\n", @@ -22,8 +37,8 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-11T08:29:18.838427Z", - "start_time": "2018-05-11T08:29:18.835115Z" + "end_time": "2018-08-11T14:18:23.495387Z", + "start_time": "2018-08-11T14:18:23.484875Z" } }, "outputs": [], @@ -31,13 +46,49 @@ "print(tf.__version__)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Define Graph" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2018-08-11T14:18:23.543090Z", + "start_time": "2018-08-11T14:18:23.499102Z" + } + }, + "outputs": [], + "source": [ + "with open('cnn_weights.pkl', 'rb') as fid:\n", + " pretrain_weights = pickle.load(fid)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2018-08-11T14:18:23.774598Z", + "start_time": "2018-08-11T14:18:23.763396Z" + } + }, + "outputs": [], + "source": [ + "from functools import reduce" + ] + }, { "cell_type": "code", "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-11T08:29:18.845812Z", - "start_time": "2018-05-11T08:29:18.840698Z" + "end_time": "2018-08-11T14:18:29.166289Z", + "start_time": "2018-08-11T14:18:29.149684Z" } }, "outputs": [], @@ -56,7 +107,7 @@ " - seed: random seed of the initializer\n", " \"\"\"\n", " if initializer is None:\n", - " initializer = tf.glorot_normal_initializer(seed=seed, dtype=dtype)\n", + " initializer = tf.glorot_uniform_initializer(seed=seed, dtype=dtype)\n", " filter_shape = [width, height, in_channels, out_channels]\n", " return tf.Variable(initializer(shape=filter_shape), name=name, dtype=dtype)" ] @@ -66,15 +117,15 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-11T08:29:18.851889Z", - "start_time": "2018-05-11T08:29:18.847957Z" + "end_time": "2018-08-11T14:18:37.930133Z", + "start_time": "2018-08-11T14:18:37.913825Z" } }, "outputs": [], "source": [ "def get_bias(shape, dtype=tf.float32, name=None, initializer=None, seed=None):\n", " if initializer is None:\n", - " initializer = tf.glorot_normal_initializer(seed=seed, dtype=dtype)\n", + " initializer = tf.glorot_uniform_initializer(seed=seed, dtype=dtype)\n", " return tf.Variable(initializer(shape=shape), name=name, dtype=dtype)" ] }, @@ -83,13 +134,13 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-11T08:29:18.860543Z", - "start_time": "2018-05-11T08:29:18.854365Z" + "end_time": "2018-08-11T14:25:40.900334Z", + "start_time": "2018-08-11T14:25:40.878602Z" } }, "outputs": [], "source": [ - "def conv_layer(in_fmap, w_shape, padding='SAME', stride=1, relu=True, name=None):\n", + "def conv_layer(in_fmap, w_shape, padding='SAME', stride=1, act_fun=None, name=None):\n", " width, height, in_channel, out_channel = w_shape\n", " strides = [1, stride, stride, 1]\n", " with tf.name_scope(name, 'conv'):\n", @@ -101,9 +152,9 @@ " bias = get_bias(w_filter.shape.as_list()[-1:],\n", " dtype=in_fmap.dtype,\n", " name='bias')\n", - " act = tf.add(out_fmap, bias, name='activation')\n", - " if relu:\n", - " act = tf.nn.relu(act, name='relu')\n", + " act = tf.add(out_fmap, bias, name='logits')\n", + " if act_fun:\n", + " act = act_fun(act, name='activation')\n", " return act" ] }, @@ -112,24 +163,23 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-11T08:29:18.867391Z", - "start_time": "2018-05-11T08:29:18.863081Z" + "end_time": "2018-08-11T14:39:13.408404Z", + "start_time": "2018-08-11T14:39:13.391506Z" } }, "outputs": [], "source": [ - "def fc_layer(in_tensor, out_dim, act_func=None, initializer=None, name=None):\n", + "def fc_layer(in_tensor, out_dim, act_fun=None, initializer=None, name=None):\n", " \"\"\"Fully conneted layer\n", " \"\"\"\n", " if initializer is None:\n", " initializer = tf.glorot_normal_initializer(dtype=in_tensor.dtype)\n", - " if act_func is None:\n", - " act_func = tf.nn.relu\n", " w_shape = [in_tensor.shape.as_list()[-1], out_dim]\n", " with tf.name_scope(name, 'fully_connect'):\n", " w_fc = tf.Variable(initializer(shape=w_shape, dtype=in_tensor.dtype), name='weight')\n", - " logits = tf.matmul(in_tensor, w_fc, name='logit')\n", - " act = act_func(logits, name='activation')\n", + " act = tf.matmul(in_tensor, w_fc, name='logits')\n", + " if act_fun:\n", + " act = act_fun(act, name='activation')\n", " return act" ] }, @@ -138,13 +188,20 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-11T08:29:18.872275Z", - "start_time": "2018-05-11T08:29:18.869915Z" + "end_time": "2018-08-11T14:25:41.568871Z", + "start_time": "2018-08-11T14:25:41.550930Z" } }, "outputs": [], "source": [ - "from functools import reduce" + "def cross_entropy_loss(logits, labels, name=None, axis=-1):\n", + " '''https://github.com/keras-team/keras/blob/master/keras/backend/tensorflow_backend.py#L3171\n", + " '''\n", + " with tf.name_scope(name, 'cross_entropy'):\n", + " prob = tf.nn.softmax(logits=logits, axis=axis)\n", + " prob = tf.clip_by_value(prob, 1e-7, 1-1e-7)\n", + " loss = tf.reduce_sum(-labels * tf.log(prob), name='total_loss')\n", + " return loss" ] }, { @@ -152,207 +209,67 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-11T08:29:19.288785Z", - "start_time": "2018-05-11T08:29:18.875572Z" + "end_time": "2018-08-11T15:36:48.189400Z", + "start_time": "2018-08-11T15:36:46.688526Z" } }, "outputs": [], "source": [ "graph = tf.Graph()\n", + "\n", "with graph.as_default():\n", - " tf_img_batch = tf.placeholder(tf.float32, \n", - " shape=[None, 32, 32, 3], \n", - " name='img_batch')\n", - " tf_label_batch = tf.placeholder(tf.float32,\n", - " shape=[None, 10],\n", - " name='label_batch')\n", - " relu_1_1 = conv_layer(tf_img_batch, [3, 3, 3, 64], name='conv_1_1')\n", - " relu_1_2 = conv_layer(relu_1_1, [3, 3, 64, 64], name='conv_1_2')\n", - " pool_1 = tf.nn.max_pool(relu_1_2, \n", - " ksize=[1, 2, 2, 1],\n", - " strides=[1, 2, 2, 1], \n", - " padding='SAME',\n", - " name='pool_1')\n", - " relu_2_1 = conv_layer(pool_1, [3, 3, 64, 32], name='conv_2_1')\n", - " relu_2_2 = conv_layer(relu_2_1, [3, 3, 32, 32], name='conv_2_2')\n", - " pool_2 = tf.nn.max_pool(relu_2_2,\n", - " ksize=[1, 2, 2, 1],\n", - " strides=[1, 2, 2, 1],\n", - " padding='SAME',\n", - " name='pool_2')\n", - " relu_3_1 = conv_layer(pool_2, [5, 5, 32, 32], name='conv_3_1')\n", - " relu_3_2 = conv_layer(relu_3_1, [5, 5, 32, 32], name='conv_3_2')\n", - " pool_3 = tf.nn.max_pool(relu_3_2,\n", - " ksize=[1, 2, 2, 1],\n", - " strides=[1, 2, 2, 1],\n", - " padding='SAME',\n", - " name='pool_3')\n", - " N_dim = reduce(lambda x, acc: acc*x, pool_3.shape.as_list()[1:])\n", - " flat_vec = tf.reshape(pool_3, [-1, N_dim], name='input_vec')\n", - " fc_1 = fc_layer(flat_vec, 256, name='fc_1')\n", - " keep_prob_1 = tf.placeholder(tf.float32, name='keep_prob_1')\n", - " dropout_1 = tf.nn.dropout(fc_1, keep_prob=keep_prob_1, name='dropout_1')\n", - " keep_prob_2 = tf.placeholder(tf.float32, name='keep_prob_2')\n", - " fc_2 = fc_layer(dropout_1, 128, name='fc_2')\n", - " dropout_2 = tf.nn.dropout(fc_2, keep_prob=keep_prob_2, name='dropout_2')\n", - " fc_3 = fc_layer(dropout_2, 10, name='fc_3')\n", + " tf_image_batch = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])\n", + " tf_labels = tf.placeholder(tf.float32, shape=[None, 10])\n", + " tf_keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n", " \n", - " pred_label = tf.argmax(fc_3, -1, name='pred_label')\n", + " conv1 = conv_layer(tf_image_batch, [2, 2, 3, 16],\n", + " padding='VALID')\n", + " conv2 = conv_layer(conv1,\n", + " [3, 3, 16, 32],\n", + " padding='VALID',\n", + " act_fun=tf.nn.relu)\n", + " pool1 = tf.nn.max_pool(conv2,\n", + " ksize=[1, 2, 2, 1],\n", + " strides=[1, 2, 2, 1],\n", + " padding='VALID')\n", + " conv3 = conv_layer(pool1,\n", + " [3, 3, 32, 32],\n", + " stride=2,\n", + " padding='VALID')\n", + " conv4 = conv_layer(conv3,\n", + " [3, 3, 32, 32],\n", + " padding='VALID',\n", + " stride=2,\n", + " act_fun=tf.nn.relu)\n", + " drop1 = tf.nn.dropout(conv4, keep_prob=tf_keep_prob)\n", + " pool2 = tf.nn.max_pool(drop1,\n", + " ksize=[1, 2, 2, 1],\n", + " strides=[1, 2, 2, 1],\n", + " padding='VALID')\n", + " conv5 = conv_layer(pool2,\n", + " [1, 1, 32, 64],\n", + " padding='VALID',\n", + " act_fun=tf.nn.relu)\n", + " conv6 = conv_layer(conv5,\n", + " [1, 1, 64, 128],\n", + " act_fun=tf.nn.relu)\n", + " flat_conv6 = tf.reshape(conv6, shape=[-1, reduce(lambda x, y: x*y, conv6.shape.as_list()[1:], 1)])\n", + " fc1 = fc_layer(flat_conv6, 128, act_fun=tf.nn.relu)\n", + " drop_2 = tf.nn.dropout(fc1, keep_prob=tf_keep_prob)\n", + " fc2 = fc_layer(drop_2, 64, act_fun=tf.nn.relu)\n", + " logits = fc_layer(fc2, 10)\n", + " tf_pred = tf.argmax(logits, axis=-1, name='pred')\n", + " total_loss = cross_entropy_loss(logits=logits, labels=tf_labels)\n", " \n", - " with tf.name_scope('Loss'):\n", - " loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf_label_batch,\n", - " logits=fc_3,\n", - " name='cross_entropy')\n", - " total_loss = tf.reduce_sum(loss, name='total_cross_entropy')\n", - " train_op = tf.train.AdamOptimizer(1e-4).minimize(total_loss, name='train_op') " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2018-05-11T08:29:19.605109Z", - "start_time": "2018-05-11T08:29:19.471218Z" - } - }, - "outputs": [], - "source": [ - "!rm -rf ckpt && mkdir -p ckpt/cnn" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2018-05-11T08:29:20.762941Z", - "start_time": "2018-05-11T08:29:20.527300Z" - } - }, - "outputs": [], - "source": [ - "with tf.Session(graph=graph) as sess:\n", - " tf.global_variables_initializer().run()\n", - " saver = tf.train.Saver()\n", - " ckpt = saver.save(sess, 'ckpt/cnn/model')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2018-05-11T08:29:21.667197Z", - "start_time": "2018-05-11T08:29:21.150975Z" - } - }, - "outputs": [], - "source": [ - "graph_def = prepare_meta_graph(ckpt+'.meta', output_nodes=[pred_label.op.name])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2018-05-11T08:29:23.556859Z", - "start_time": "2018-05-11T08:29:23.550728Z" - } - }, - "outputs": [], - "source": [ - "with open('test_cnn_float.pb', 'wb') as fid:\n", - " fid.write(graph_def.SerializeToString())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2018-05-11T08:29:24.397295Z", - "start_time": "2018-05-11T08:29:24.263998Z" - } - }, - "outputs": [], - "source": [ - "!rm -rf logs && mkdir logs" + " train_op = tf.train.AdadeltaOptimizer(learning_rate=1.0, epsilon=1e-7).minimize(total_loss)\n", + " saver = tf.train.Saver(max_to_keep=None)" ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2018-05-11T08:29:26.194834Z", - "start_time": "2018-05-11T08:29:26.073214Z" - } - }, - "outputs": [], - "source": [ - "tf.summary.FileWriter(logdir='logs/ori_graph', graph=graph).close()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2018-05-11T08:29:27.196382Z", - "start_time": "2018-05-11T08:29:27.183400Z" - } - }, - "outputs": [], - "source": [ - "ckpt" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2018-05-11T08:29:28.168627Z", - "start_time": "2018-05-11T08:29:28.165524Z" - } - }, - "outputs": [], - "source": [ - "meta_path = ckpt + '.meta'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2018-05-11T08:29:38.029464Z", - "start_time": "2018-05-11T08:29:38.022860Z" - } - }, - "outputs": [], - "source": [ - "for node in graph_def.node:\n", - " print(node.name)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "ExecuteTime": { - "end_time": "2018-05-11T08:29:41.572095Z", - "start_time": "2018-05-11T08:29:41.430967Z" - } - }, - "outputs": [], + "cell_type": "markdown", + "metadata": {}, "source": [ - "trans_graph_def = TransformGraph(input_graph_def=graph_def,\n", - " inputs=[],\n", - " outputs=[pred_label.op.name],\n", - " transforms=[\"quantize_weights\", \"quantize_nodes\"])" + "# Train" ] }, { @@ -360,16 +277,13 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-11T08:29:55.877408Z", - "start_time": "2018-05-11T08:29:55.608587Z" + "end_time": "2018-08-11T15:36:49.480776Z", + "start_time": "2018-08-11T15:36:49.466199Z" } }, "outputs": [], "source": [ - "new_graph = tf.Graph()\n", - "with new_graph.as_default():\n", - " tf.import_graph_def(trans_graph_def, name='')\n", - "tf.summary.FileWriter(logdir='logs/quant_graph', graph=new_graph).close()" + "from cifar import read_data_sets" ] }, { @@ -377,14 +291,13 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-11T08:30:04.206427Z", - "start_time": "2018-05-11T08:30:04.194329Z" + "end_time": "2018-08-11T15:36:49.945291Z", + "start_time": "2018-08-11T15:36:49.939798Z" } }, "outputs": [], "source": [ - "with open('test_cnn.pb', 'wb') as fid:\n", - " fid.write(trans_graph_def.SerializeToString())" + "from tensorflow.keras.preprocessing.image import ImageDataGenerator" ] }, { @@ -392,13 +305,13 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-05T14:53:22.805168Z", - "start_time": "2018-05-05T14:53:22.764063Z" + "end_time": "2018-08-11T15:36:50.346772Z", + "start_time": "2018-08-11T15:36:50.337700Z" } }, "outputs": [], "source": [ - "from utensor_cgen.operators import OperatorFactory" + "import sys" ] }, { @@ -406,18 +319,15 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-05T14:53:24.669087Z", - "start_time": "2018-05-05T14:53:24.658360Z" + "end_time": "2018-08-11T15:53:39.127171Z", + "start_time": "2018-08-11T15:53:39.115280Z" } }, "outputs": [], "source": [ - "for n in new_graph_def.node:\n", - " if n.op not in ['Const', 'Placeholder'] and \\\n", - " n.op not in OperatorFactory._operators:\n", - " print(n.name, n.op)\n", - " if n.op == 'QuantizedReshape':\n", - " node = n" + "batch_size = 50\n", + "num_iter_per_epoch = 1500\n", + "num_epoch = 10" ] }, { @@ -425,13 +335,57 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-05T06:26:47.743514Z", - "start_time": "2018-05-05T06:26:47.737707Z" + "end_time": "2018-08-11T17:24:26.761950Z", + "start_time": "2018-08-11T16:02:43.432175Z" } }, "outputs": [], "source": [ - "type(node)" + "!rm -rf ckpt && mkdir -p ckpt/cnn\n", + "\n", + "# this will takes long to complete if running on CPU\n", + "cifar = read_data_sets('./data', one_hot=True, reshape=False)\n", + "img_gen = ImageDataGenerator(width_shift_range=0.1,\n", + " height_shift_range=0.1,\n", + " horizontal_flip=True)\n", + "img_gen.fit(cifar.train.images)\n", + "batch_gen = img_gen.flow(cifar.train.images,\n", + " cifar.train.labels,\n", + " batch_size=batch_size)\n", + "\n", + "with tf.Session(graph=graph) as sess:\n", + " tf.global_variables_initializer().run()\n", + " # compute original loss\n", + " l, p_labels = sess.run([total_loss, tf_pred],\n", + " feed_dict={tf_image_batch: cifar.test.images,\n", + " tf_labels: cifar.test.labels,\n", + " tf_keep_prob: 1.0})\n", + " l /= cifar.test.images.shape[0]\n", + " acc = (p_labels == np.argmax(cifar.test.labels, axis=-1)).mean()\n", + " print(f'original loss: {l}')\n", + " print(f'acc on test set: {acc*100:.2f}%')\n", + " \n", + " best_loss = float('inf')\n", + " for epoch in range(num_epoch):\n", + " print(f'epoch {epoch} start')\n", + " for _ in range(num_iter_per_epoch):\n", + " images_batch, labels_batch = next(batch_gen)\n", + " _ = sess.run(train_op,\n", + " feed_dict={tf_image_batch: images_batch,\n", + " tf_labels: labels_batch,\n", + " tf_keep_prob: 0.9})\n", + " test_loss, p_labels = sess.run([total_loss, tf_pred],\n", + " feed_dict={tf_image_batch: cifar.test.images,\n", + " tf_labels: cifar.test.labels,\n", + " tf_keep_prob: 1.0})\n", + " test_loss /= cifar.test.images.shape[0]\n", + " acc = (p_labels == np.argmax(cifar.test.labels, axis=-1)).mean()\n", + " print(f'test loss: {test_loss}, {acc*100:0.2f}%')\n", + " ckpt = saver.save(sess, 'ckpt/cnn/model', global_step=epoch)\n", + " if test_loss < best_loss:\n", + " best_loss = test_loss\n", + " best_ckpt = ckpt\n", + " print(f'epoch saved {ckpt}')" ] }, { @@ -439,13 +393,13 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-05T06:27:41.227498Z", - "start_time": "2018-05-05T06:27:41.221950Z" + "end_time": "2018-08-11T17:24:26.816351Z", + "start_time": "2018-08-11T17:24:26.801871Z" } }, "outputs": [], "source": [ - "node.input" + "best_ckpt" ] }, { @@ -453,13 +407,13 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-05T06:26:49.384225Z", - "start_time": "2018-05-05T06:26:49.379036Z" + "end_time": "2018-08-11T17:24:27.081511Z", + "start_time": "2018-08-11T17:24:26.820178Z" } }, "outputs": [], "source": [ - "list(node.attr.keys())" + "!tree ckpt" ] }, { @@ -467,13 +421,13 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-05T06:26:50.322745Z", - "start_time": "2018-05-05T06:26:50.316600Z" + "end_time": "2018-08-11T17:28:47.469952Z", + "start_time": "2018-08-11T17:28:46.383686Z" } }, "outputs": [], "source": [ - "node.attr['T']" + "graph_def = prepare_meta_graph(best_ckpt+'.meta', output_nodes=[tf_pred.op.name])" ] }, { @@ -481,13 +435,14 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2018-05-05T06:26:55.734316Z", - "start_time": "2018-05-05T06:26:55.729449Z" + "end_time": "2018-08-11T17:28:48.054678Z", + "start_time": "2018-08-11T17:28:48.047511Z" } }, "outputs": [], "source": [ - "node.op" + "with open('cifar10_cnn.pb', 'wb') as fid:\n", + " fid.write(graph_def.SerializeToString())" ] }, { @@ -500,9 +455,9 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "utensor", "language": "python", - "name": "python3" + "name": "utensor" }, "language_info": { "codemirror_mode": { @@ -514,17 +469,30 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.4" + "version": "3.6.2" }, "toc": { + "colors": { + "hover_highlight": "#DAA520", + "navigate_num": "#000000", + "navigate_text": "#333333", + "running_highlight": "#FF0000", + "selected_highlight": "#FFD700", + "sidebar_border": "#EEEEEE", + "wrapper_background": "#FFFFFF" + }, + "moveMenuLeft": true, "nav_menu": {}, + "navigate_menu": true, "number_sections": true, "sideBar": true, "skip_h1_title": false, + "threshold": 4, "toc_cell": false, "toc_position": {}, "toc_section_display": "block", - "toc_window_display": false + "toc_window_display": false, + "widenNotebook": false } }, "nbformat": 4, diff --git a/tests/deep_cnn/test_cnn_float.pb b/tests/deep_cnn/test_cnn_float.pb deleted file mode 100644 index a93c06d6..00000000 Binary files a/tests/deep_cnn/test_cnn_float.pb and /dev/null differ diff --git a/tests/test_ir/test_uTensorGraph/test_graph.py b/tests/test_ir/test_uTensorGraph/test_graph.py index eb3e8f3a..ca67735f 100644 --- a/tests/test_ir/test_uTensorGraph/test_graph.py +++ b/tests/test_ir/test_uTensorGraph/test_graph.py @@ -1,11 +1,12 @@ from copy import deepcopy -import tensorflow as tf import numpy as np +import tensorflow as tf -from utensor_cgen.ir import uTensorGraph, OperationInfo +from utensor_cgen.ir import OperationInfo, uTensorGraph from utensor_cgen.ir.converter import TensorProtoConverter + def test_ugraph_topo_order(graph_tuple): graph_def, output_nodes = graph_tuple ugraph = uTensorGraph(graph_def, @@ -35,13 +36,24 @@ def test_op_info(): op_type='no_op', backend='tensorflow', op_attr={ - '_to_skip': [1, 2, 3], - '_skip_this_too': None, + '_utensor_to_skip': [1, 2, 3], + '_utensor_skip_this_too': None, 'tensor_no_skip': t_proto }) - assert op_info.op_attr.get('_to_skip', None) == [1, 2, 3] - assert op_info.op_attr.get('_skip_this_too') is None + assert op_info.op_attr.get('_utensor_to_skip', None) == [1, 2, 3] + assert op_info.op_attr.get('_utensor_skip_this_too') is None generic_tensor = op_info.op_attr.get('tensor_no_skip') assert isinstance(generic_tensor, TensorProtoConverter.__utensor_generic_type__) assert (generic_tensor.np_array == np_array).all() + +def test_in_out_nodes(graph_tuple): + ugraph = uTensorGraph(*graph_tuple) + x3 = ugraph.ops_info['x3'] + assert x3.ugraph is ugraph + assert len(x3.input_nodes) == len(set([op.name for op in x3.input_nodes])) + assert all([str(op.name) in ['x2', 'bias2'] for op in x3.input_nodes]) + assert x3.output_nodes == [] + + x2 = ugraph.ops_info['x2'] + assert [str(op.name) for op in x2.output_nodes] == ['x3'] diff --git a/tests/test_transformer/test_dropout/test_dropout_transormer.py b/tests/test_transformer/test_dropout/test_dropout_transormer.py index ad0f6721..f14f610e 100644 --- a/tests/test_transformer/test_dropout/test_dropout_transormer.py +++ b/tests/test_transformer/test_dropout/test_dropout_transormer.py @@ -10,6 +10,10 @@ def test_dropout_trans(droput_graph_tuple): ugraph = uTensorGraph(graph_def, output_nodes=output_nodes) transformer = DropoutTransformer() new_ugraph = transformer.transform(ugraph) + for op in new_ugraph.ops_info.values(): + assert op.ugraph + out_op = new_ugraph.ops_info[output_nodes[0]] + assert set([str(op.name) for op in out_op.input_nodes]) == set(['x', 'bias']) # all dropout nodes should be gone graph_1 = tf.Graph() graph_2 = tf.Graph() diff --git a/tests/test_transformer/test_inline/__init__.py b/tests/test_transformer/test_inline/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/test_transformer/test_inline/conftest.py b/tests/test_transformer/test_inline/conftest.py new file mode 100644 index 00000000..325d0da5 --- /dev/null +++ b/tests/test_transformer/test_inline/conftest.py @@ -0,0 +1,18 @@ +import tensorflow as tf +import pytest + +@pytest.fixture(scope='session', name='inlinegraph_tuple') +def refgraph(): + graph = tf.Graph() + with graph.as_default(): + x = tf.constant(1, name='x', dtype=tf.float32) + y = tf.constant(1, name='y', dtype=tf.float32) + z = tf.add(x, y, name='z') + w = tf.add(x, 2.0, name='w') + k = tf.add(z, w, name='k') + inline_ans = { + x.op.name : 'inline', + y.op.name : 'inline', + } + + return graph.as_graph_def(), inline_ans, [k.op.name] diff --git a/tests/test_transformer/test_inline/test_inline_optimizer.py b/tests/test_transformer/test_inline/test_inline_optimizer.py new file mode 100644 index 00000000..7803acb3 --- /dev/null +++ b/tests/test_transformer/test_inline/test_inline_optimizer.py @@ -0,0 +1,12 @@ +from utensor_cgen.transformer import InlineTransformer +from utensor_cgen.ir import uTensorGraph + +def test_inline_optimizer(inlinegraph_tuple): + (graph_def, inline_ans, output_nodes)= inlinegraph_tuple + ugraph = uTensorGraph(graph_def, output_nodes) + transformer = InlineTransformer() + ugraph = transformer.transform(ugraph) + for node_name in ugraph.topo_order: + if node_name in inline_ans: + op_type = ugraph.ops_info[node_name].op_type + assert op_type == 'Inline' \ No newline at end of file diff --git a/utensor_cgen/__main__.py b/utensor_cgen/__main__.py index 347fff08..98a70be3 100644 --- a/utensor_cgen/__main__.py +++ b/utensor_cgen/__main__.py @@ -1,83 +1,5 @@ # -*- coding:utf8 -*- -# pylint: disable=C0301 -import argparse -import os -import pkg_resources - - -def _get_pb_model_name(path): - return os.path.basename(os.path.splitext(path)[0]) - - -def main(pb_file, src_fname, idx_dir, embed_data_dir, - debug_cmt, output_nodes, trans_methods, model_dir): - if pb_file is None: - raise ValueError("No pb file given") - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - # MODEL should default to pb_file - if idx_dir is None: - idx_dir = os.path.join("constants", _get_pb_model_name(pb_file)) - - if src_fname is None: - src_fname = _get_pb_model_name(pb_file) + ".cpp" - model_path = os.path.join(model_dir, src_fname) - - from .code_generator import CodeGenerator - - if embed_data_dir is None: - embed_data_dir = os.path.join("/fs", idx_dir) - # TODO: pass transformation kwargs to codegenerator (better argument parser) - generator = CodeGenerator(pb_file, idx_dir, embed_data_dir, trans_methods, output_nodes, debug_cmt) - generator.generate(model_path) - - -def _nargs(sep=','): - def parser(argstr): - return argstr.split(sep) - return parser - - -def _build_parser(): - pkg_version = pkg_resources.get_distribution('utensor_cgen').version - parser = argparse.ArgumentParser() - parser.add_argument("pb_file", metavar='MODEL.pb', - help="input protobuf file") - parser.add_argument("-d", "--data-dir", dest='idx_dir', - metavar="DIR", - help="ouptut directory for tensor data idx files (defaults to protobuf name, e.g.: constants/my_model)") - parser.add_argument("-m", "--model-dir", dest='model_dir', - metavar="DIR", default="models", - help="ouptut directory for tensor data idx files (default: %(default)s)") - parser.add_argument("-o", "--output", dest="src_fname", - metavar="FILE.cpp", - help="output source file name, header file will be named accordingly. (defaults to protobuf name, e.g.: my_model.cpp)") - parser.add_argument("-D", "--embed-data-dir", dest="embed_data_dir", - metavar="EMBED_DIR", default=None, - help="the data dir on the develop board (default: the value as the value of -d/data-dir flag)") - parser.add_argument("--output-nodes", dest="output_nodes", - type=_nargs(), metavar="NODE_NAME,NODE_NAME,...", - required=True, - help="list of output nodes (required)") - parser.add_argument("-O", "--transform-methods", dest='trans_methods', - type=_nargs(), default='dropout,quantize,refcnt', - help='optimization methods (default: %(default)s)', - metavar='METHOD,METHOD,...') - parser.add_argument("--debug-comment", dest="debug_cmt", - action="store_true", - help="Add debug comments in the output source file (default: %(default)s)") - parser.add_argument("-v", "--version", action="version", - version="utensor-cli {}".format(pkg_version), - help="show version") - return parser - - -def cli(): - parser = _build_parser() - args = vars(parser.parse_args()) - main(**args) - +from utensor_cgen.cli import cli if __name__ == "__main__": cli() diff --git a/utensor_cgen/cli.py b/utensor_cgen/cli.py new file mode 100644 index 00000000..09413d39 --- /dev/null +++ b/utensor_cgen/cli.py @@ -0,0 +1,94 @@ +#-*- coding:utf8 -*- +import argparse +import os + +import pkg_resources + +import click +from .utils import NArgsParam + + +def _get_pb_model_name(path): + return os.path.basename(os.path.splitext(path)[0]) + +@click.group(name='utensor-cli') +@click.help_option('-h', '--help') +@click.version_option((pkg_resources + .get_distribution('utensor_cgen') + .version), + '-V', '--version') +def cli(): + pass + + +@cli.command(name='convert', help='convert graph to cpp/hpp files') +@click.help_option('-h', '--help') +@click.argument('pb_file', required=True, metavar='MODEL.pb') +@click.option('-o', '--output', + metavar="FILE.cpp", + help="output source file name, header file will be named accordingly. (defaults to protobuf name, e.g.: my_model.cpp)") +@click.option('-d', '--data-dir', + metavar='DIR', + help="ouptut directory for tensor data idx files", + show_default=True) +@click.option('-D', '--embed-data-dir', + metavar='EMBED_DIR', + help=("the data dir on the develop board " + "(default: the value as the value of -d/data-dir flag)")) +@click.option('--debug-comment', + is_flag=True, + help="Add debug comments in the output source file", + show_default=True) +@click.option("--output-nodes", + type=NArgsParam(), + metavar="NODE_NAME,NODE_NAME,...", + required=True, + help="list of output nodes") +@click.option("--transform-methods", + type=NArgsParam(), + default='dropout,quantize,refcnt,inline', + help='optimization methods', + metavar='METHOD,METHOD,...', + show_default=True) +@click.option("-m", "--model-dir", + metavar="DIR", + default="models", + help="ouptut directory for tensor data idx files", + show_default=True) +def convet_graph(pb_file, output, data_dir, embed_data_dir, + debug_comment, output_nodes, transform_methods, model_dir): + from utensor_cgen.code_generator import CodeGenerator + + if pb_file is None: + raise ValueError("No pb file given") + + if not os.path.exists(model_dir): + os.makedirs(model_dir) + # MODEL should default to pb_file + if data_dir is None: + data_dir = os.path.join("constants", _get_pb_model_name(pb_file)) + + if output is None: + output = _get_pb_model_name(pb_file) + ".cpp" + model_path = os.path.join(model_dir, output) + + if embed_data_dir is None: + embed_data_dir = os.path.join("/fs", data_dir) + # TODO: pass transformation kwargs to codegenerator (better argument parser) + generator = CodeGenerator(pb_file, data_dir, embed_data_dir, transform_methods, output_nodes, debug_comment) + generator.generate(model_path) + + +@cli.command(name='show', help='show node names in the pb file') +@click.help_option('-h', '--help') +@click.argument('pb_file', required=True, metavar='MODEL.pb') +def show_pb_file(pb_file): + import tensorflow as tf + graph_def = tf.GraphDef() + with open(pb_file, 'rb') as fid: + graph_def.ParseFromString(fid.read()) + for node in graph_def.node: + print(node.name) + +if __name__ == '__main__': + cli() diff --git a/utensor_cgen/code_generator.py b/utensor_cgen/code_generator.py index d488579b..64c6704b 100644 --- a/utensor_cgen/code_generator.py +++ b/utensor_cgen/code_generator.py @@ -13,7 +13,8 @@ from .transformer.optimizer import RefCntOptimizer from .ir import uTensorGraph from .snippets import (CommentSnippet, ContextHeaderSnippet, - ContextSnippetsContainer, CreateTensorIdxSnippet) + ContextSnippetsContainer, CreateTensorIdxSnippet, + CreateTensorBinarySnippet, ContextGlobalArrayContainer) from .snippets.composer import Composer from .utils import NamescopedKWArgsParser @@ -51,12 +52,14 @@ def _generate_from_pb(self, src_fname): fname, _ = os.path.splitext(src_fname) graph_name, _ = os.path.splitext(os.path.basename(self.model_file)) guard_name = fname.replace('/', '_') + weightheader_fname = '{}_weight.hpp'.format(fname) header_snippet = ContextHeaderSnippet(guard_name, graph_name) - + weight_container = ContextGlobalArrayContainer() composer = Composer() header_fname = '{}.hpp'.format(fname) header_name = os.path.basename(header_fname) - container = ContextSnippetsContainer(graph_name, header_name) + weightheader_name = os.path.basename(weightheader_fname) + container = ContextSnippetsContainer(graph_name, header_name, weightheader_name) opFactory = OperatorFactory() @@ -82,9 +85,12 @@ def _generate_from_pb(self, src_fname): container.template_vars["ref_counts"].append(ref_count) header_snippet.template_vars["placeholders"].append(out_tname) else: + # TODO: the operator may correspond to multiple snippets (such as InlinTensor) + # weight_container is passed to function for workaround snippet = opFactory.createOperatorSnippet(op_info, idx_dir=self.idx_dir, - embeded_data_dir=self.embed_data_dir) + embed_data_dir=self.embed_data_dir, + weight_container=weight_container) container.add_snippet(snippet) if self.debug_cmt: @@ -94,6 +100,14 @@ def _generate_from_pb(self, src_fname): container.add_snippet(cmt_snippet) composer.add_snippet(container) + if 'inline' in self.trans_methods: + _logger.info("Generate weight file: %s", weightheader_fname) + with open(weightheader_fname, "w") as wf: + wf.write('// Auto generated by utensor-cli\n\n') + wf.write(weight_container.render()) + else: + container.remove_header('"{}"'.format(weightheader_name)) + _logger.info("Generate header file: %s", header_fname) with open(header_fname, "w") as wf: wf.write('// Auto generated by utensor-cli\n\n') diff --git a/utensor_cgen/ir/base.py b/utensor_cgen/ir/base.py index da8464fe..4402b2c1 100644 --- a/utensor_cgen/ir/base.py +++ b/utensor_cgen/ir/base.py @@ -1,22 +1,23 @@ # -*- coding: utf8 -*- import re +from collections import defaultdict from copy import deepcopy -import six - import attr import numpy as np +import six import tensorflow as tf from attr.validators import instance_of from tensorflow.contrib.util import make_ndarray from tensorflow.core.framework.attr_value_pb2 import AttrValue as _AttrValue -from tensorflow.core.framework.attr_value_pb2 import ( - NameAttrList as _NameAttrList) +from tensorflow.core.framework.attr_value_pb2 import \ + NameAttrList as _NameAttrList from tensorflow.core.framework.tensor_pb2 import TensorProto as _TensorProto -from tensorflow.core.framework.tensor_shape_pb2 import ( - TensorShapeProto as _TensorShapeProto) +from tensorflow.core.framework.tensor_shape_pb2 import \ + TensorShapeProto as _TensorShapeProto from tensorflow.core.framework.types_pb2 import DataType as _DataType from tensorflow.tools.graph_transforms import TransformGraph + from utensor_cgen.utils import parse_tensor_name from .converter import AttrValueConverter, ConverterFactory @@ -27,7 +28,7 @@ class _NoShallowCopyMixin(object): def __copy__(self): - raise NotImplementedError('shallow copy is not allowed for type %s' % type(self)) + raise RuntimeError('shallow copy is not allowed for type %s' % type(self)) class IRBase(object): @@ -45,6 +46,7 @@ class TensorInfo(IRBase, _NoShallowCopyMixin): shape : list """ name = attr.ib(validator=instance_of(six.text_type)) + op_name = attr.ib(validator=instance_of(six.text_type)) dtype = attr.ib(validator=instance_of(np.dtype)) shape = attr.ib(validator=instance_of((list, type(None)))) @shape.validator @@ -56,16 +58,19 @@ def check(self, attrib, shape_values): def __deepcopy__(self, memo): return TensorInfo(name=self.name, + op_name=self.op_name, dtype=self.dtype, shape=deepcopy(self.shape, memo)) -@attr.s +# @attr.s class OperationInfo(IRBase, _NoShallowCopyMixin): """ name : str input_tensors : List[TensorInfo] output_tensors : List[TensorInfo] + input_nodes : Set[OperationInfo] + output_nodes : Set[OperationInfo] op_type : str backend : str {"tensorflow", 'pytorch'(future work)} op_attr : dict @@ -100,9 +105,28 @@ def check(self, attribute, value): raise ValueError('Unsupported backend: {}'.format(value)) op_attr = attr.ib(factory=dict, converter=dict) + + # ugraph = attr.ib(default=None, init=False) + @property + def input_nodes(self): + in_ops = [] + for tensor in self.input_tensors: + if tensor.op_name not in in_ops: + in_ops.append(tensor.op_name) + return [self.ugraph.ops_info[name] for name in in_ops] + + @property + def output_nodes(self): + out_ops = [] + for op in self.ugraph.ops: + for in_tensor in op.input_tensors: + if in_tensor.op_name == self.name and op.name not in out_ops: + out_ops.append(op.name) + break + return [self.ugraph.ops_info[name] for name in out_ops] def __attrs_post_init__(self): - skip_pattern = re.compile(r'_[^_]*') + skip_pattern = re.compile(r'_utensor_[^_]*') if self.op_attr: op_attr = {} for k, v in self.op_attr.items(): @@ -112,14 +136,18 @@ def __attrs_post_init__(self): else: op_attr[k] = ConverterFactory.get_generic_value(v) self.op_attr = op_attr + if self.ugraph is None: + raise ValueError('ugraph is not set properly') def __deepcopy__(self, memo): - return OperationInfo(name=self.name, - input_tensors=deepcopy(self.input_tensors, memo), - output_tensors=deepcopy(self.output_tensors, memo), - op_type=self.op_type, - backend=self.backend, - op_attr=deepcopy(self.op_attr, memo)) + op_info = OperationInfo(name=self.name, + input_tensors=deepcopy(self.input_tensors, memo), + output_tensors=deepcopy(self.output_tensors, memo), + op_type=self.op_type, + backend=self.backend, + op_attr=deepcopy(self.op_attr, memo), + ugraph=self.ugraph) + return op_info class uTensorGraph(IRBase, _NoShallowCopyMixin): @@ -210,17 +238,17 @@ def visit(node_name): visit(node_name) return ops_torder + # tensorflow @staticmethod - def _parse_tf_tshape(t_shape): + def _tf_parse_tshape(t_shape): try: shape = t_shape.as_list() except ValueError: shape = None return shape - # tensorflow def _init_from_graph_def(self, graph_def, output_nodes): - """Tensorflow + """Initailize graph with Tensorflow GraphDef """ if not self._tf_is_freeze_graph(graph_def): raise ValueError('Given graph_def is not freezed') @@ -238,12 +266,14 @@ def _init_from_graph_def(self, graph_def, output_nodes): for node in graph_def.node: op = graph.get_operation_by_name(node.name) in_tensors = [TensorInfo(name=tensor.name, + op_name=tensor.op.name, dtype=np.dtype(tensor.dtype.as_numpy_dtype), - shape=self._parse_tf_tshape(tensor.shape)) + shape=self._tf_parse_tshape(tensor.shape)) for tensor in op.inputs] out_tensors = [TensorInfo(name=tensor.name, + op_name=op.name, dtype=np.dtype(tensor.dtype.as_numpy_dtype), - shape=self._parse_tf_tshape(tensor.shape)) + shape=self._tf_parse_tshape(tensor.shape)) for tensor in op.outputs] op_type = node.op op_attr = node.attr @@ -252,7 +282,8 @@ def _init_from_graph_def(self, graph_def, output_nodes): output_tensors=out_tensors, op_type=op_type, backend='tensorflow', - op_attr=op_attr) + op_attr=op_attr, + ugraph=self) op_info.op_attr['tensorflow__device'] = node.device self.ops_info[node.name] = op_info self.topo_order.append(node.name) @@ -271,3 +302,7 @@ def __deepcopy__(self, memo): new_graph.output_nodes = self.output_nodes new_graph._backend = self._backend return new_graph + +OperationInfo.ugraph = attr.ib(default=None, + validator=instance_of((uTensorGraph, type(None)))) +OperationInfo = attr.s(OperationInfo) diff --git a/utensor_cgen/operators.py b/utensor_cgen/operators.py index 5fe4a1bc..50dff163 100644 --- a/utensor_cgen/operators.py +++ b/utensor_cgen/operators.py @@ -252,6 +252,41 @@ def __init__(self, op_info, **kwargs): in_dtype=in_dtype, filter_dtype=filter_dtype, out_dtypes=out_dtypes, ref_counts=ref_counts, to_eval=to_eval) +class _InlineOperator(_Operator): + + def __init__(self, op_info, **kwargs): + out_tensor_info = op_info.output_tensors[0] + out_tname, out_dtype, tensor_shape = (out_tensor_info.name, + out_tensor_info.dtype, + out_tensor_info.shape) + parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, + op_info.op_attr) + ref_count = parser.get('ref_counts', [0])[0] + pre_tname = self._prepare_tensor_name(out_tname) + inline_tname = self._prepare_inline_array_name(out_tname) + value = op_info.op_attr['value'].value.np_array.flatten() + self._snippet = CreateTensorBinarySnippet(out_tname, tensor_shape=tensor_shape, + tf_dtype=out_dtype, + sptr_name=pre_tname, + inline_name=inline_tname, + ref_count=ref_count) + + weight_snippet = WeightSnippet(inline_tname, + out_dtype, + tensor_shape, + value) + weight_container = kwargs['weight_container'] + weight_container.add_snippet(weight_snippet) + + def _prepare_tensor_name(self, tensor_name): + prepared = tensor_name.replace(":", "_").replace("/", "_") + return prepared + + def _prepare_inline_array_name(self, tensor_name): + inline = tensor_name.replace(":", "_").replace("/", "_") + preapred = "inline_{}".format(inline) + return preapred + class _ConstOperator(_Operator): def __init__(self, op_info, **kwargs): @@ -305,7 +340,8 @@ class OperatorFactory(): "Reshape": _ReshapeOperator, "QuantizedReshape": _QuantizedReshapeOperator, "QuantizedConv2D": _Conv2DOperator, - "Const": _ConstOperator} + "Const": _ConstOperator, + "Inline": _InlineOperator} def createOperatorSnippet(self, op_info, **kwargs): op_type = op_info.op_type diff --git a/utensor_cgen/snippets/_snippets.py b/utensor_cgen/snippets/_snippets.py index 96c71a97..beee74fa 100644 --- a/utensor_cgen/snippets/_snippets.py +++ b/utensor_cgen/snippets/_snippets.py @@ -14,7 +14,9 @@ "Conv2DOpSnippent", "RequantizationRangeOpSnippet", "RequantizeOpSnippet", "CommentSnippet", "ContextHeaderSnippet", - "ContextSnippetsContainer", "QuantizedAddOpSnippet"] + "ContextSnippetsContainer", "QuantizedAddOpSnippet", + "CreateTensorBinarySnippet", "WeightSnippet", + "ContextGlobalArrayContainer"] # TODO: Better abstraction, i.e a better backend for code generation class CreateTensorIdxSnippet(Snippet): @@ -47,6 +49,39 @@ def __init__(self, data_dir, tensor_name, np_dtype, self.template_vars["importer_dtype"] = NP_TYPES_MAP[np_dtype].importer_type_str self.template_vars["to_eval"] = to_eval +class CreateTensorBinarySnippet(Snippet): + __template_name__ = "snippets/create_tensor_binary.cpp" + __headers__ = set(['"uTensor/core/context.hpp"', + '"uTensor/core/tensor.hpp"']) + + def __init__(self, tensor_name, tf_dtype, tensor_shape=None, + ref_count=0, + sptr_name=None, + inline_name=None, + create_sptr=False, + to_eval=False): + if create_sptr and sptr_name is None: + raise ValueError("sptr_name can't be None if create_sptr is True") + if tf_dtype not in NP_TYPES_MAP: + raise ValueError("unsupport data type in uTensor: {}".format(tf_dtype)) + Snippet.__init__(self) + if ref_count: + self.template_vars["ref_count"] = ref_count + if create_sptr: + self.template_vars["create_sptr"] = create_sptr + self.template_vars["sptr_name"] = sptr_name + self.template_vars["tensor_type"] = "BinaryTensor" + self.template_vars["tensor_name"] = tensor_name + self.template_vars["tensor_shape"] = self._to_shape_str(tensor_shape) + self.template_vars["tensor_length"] = np.prod(tensor_shape) + self.template_vars["dtype"] = NP_TYPES_MAP[tf_dtype].tensor_type_str + self.template_vars["to_eval"] = to_eval + self.template_vars["inline_name"] = inline_name + + def _to_shape_str(self, shape): + shape_str = ",".join([str(dim) for dim in shape]) + return "{" + shape_str + "}" + class CreateTensorNewSnippet(Snippet): __template_name__ = "snippets/create_tensor_new.cpp" @@ -428,13 +463,33 @@ def __init__(self, guard_name, graph_name, placeholders=None): self.template_vars["graph_name"] = graph_name self.template_vars["placeholders"] = placeholders +class WeightSnippet(Snippet): + __template_name__ = "snippets/weight_snippet.hpp" + __headers__ = set([]) + + def __init__(self, inline_name, type, shape, value): + Snippet.__init__(self) + length = np.prod(shape) + self.template_vars['type'] = NP_TYPES_MAP[type].tensor_type_str + self.template_vars['value'] = value + self.template_vars['length'] = int(length) + self.template_vars['inline_name'] = inline_name + + +class ContextGlobalArrayContainer(SnippetContainerBase): + __template_name__ = "containers/weight_header.hpp" + __headers__ = set([]) + + def __init__(self, snippets=None): + SnippetContainerBase.__init__(self, snippets) + class ContextSnippetsContainer(SnippetContainerBase): __template_name__ = "containers/get_ctx.cpp" __headers__ = set([]) def __init__(self, - graph_name, ctx_header_name, + graph_name, ctx_header_name, ctx_weightheader_name, snippets=None, placeholders=None, ref_counts=None): SnippetContainerBase.__init__(self, snippets) if placeholders is None: @@ -449,3 +504,4 @@ def __init__(self, self.template_vars["placeholders"] = placeholders self.template_vars["ref_counts"] = ref_counts self.add_header('"{}"'.format(ctx_header_name)) + self.add_header('"{}"'.format(ctx_weightheader_name)) diff --git a/utensor_cgen/snippets/templates/containers/weight_header.hpp b/utensor_cgen/snippets/templates/containers/weight_header.hpp new file mode 100644 index 00000000..e1d89bcb --- /dev/null +++ b/utensor_cgen/snippets/templates/containers/weight_header.hpp @@ -0,0 +1,4 @@ +{% for snippet in snippets%} +{{snippet.render()}} +{% endfor %} + diff --git a/utensor_cgen/snippets/templates/snippets/create_tensor_binary.cpp b/utensor_cgen/snippets/templates/snippets/create_tensor_binary.cpp new file mode 100644 index 00000000..42b2f5bc --- /dev/null +++ b/utensor_cgen/snippets/templates/snippets/create_tensor_binary.cpp @@ -0,0 +1,19 @@ +{% if create_sptr %} +S_TENSOR {{sptr_name}}; +{% endif %} +{ + {%if ref_count%} + ctx.add(new {{tensor_type}}<{{dtype}}>({{tensor_shape}}, {{inline_name}}), + "{{tensor_name}}", + {{ref_count}}); + {% else %} + ctx.add(new {{tensor_type}}<{{dtype}}>({{tensor_shape}}, {{inline_name}}), + "{{tensor_name}}"); + {%endif%} + {% if create_sptr %} + {{sptr_name}} = ctx.get("{{tensor_name}}"); + {% endif %} + {%if to_eval%} + ctx.eval(); + {%endif%} +} diff --git a/utensor_cgen/snippets/templates/snippets/create_tensor_new.cpp b/utensor_cgen/snippets/templates/snippets/create_tensor_new.cpp index 17583adf..c00b2c0f 100644 --- a/utensor_cgen/snippets/templates/snippets/create_tensor_new.cpp +++ b/utensor_cgen/snippets/templates/snippets/create_tensor_new.cpp @@ -2,7 +2,7 @@ S_TENSOR {{sptr_name}}; {% endif %} { - ctx.add(new {{tensor_type}}<{{dtype}}>({% if tensor_shape %}{{tensor_shape}}{%endif%}), "{{tensor_name}}", {%if ref_count%}{{ref_count}}{%endif%}); + ctx.add(new {{tensor_type}}<{{dtype}}>({% if tensor_shape %}{{tensor_shape}}{%endif%}), "{{tensor_name}}"{%if ref_count%}, {{ref_count}}{%endif%}); {% if create_sptr %} {{sptr_name}} = ctx.get("{{tensor_name}}"); {% endif %} diff --git a/utensor_cgen/snippets/templates/snippets/get_ctx.hpp b/utensor_cgen/snippets/templates/snippets/get_ctx.hpp index 1a1fff1e..5338dd70 100644 --- a/utensor_cgen/snippets/templates/snippets/get_ctx.hpp +++ b/utensor_cgen/snippets/templates/snippets/get_ctx.hpp @@ -6,4 +6,4 @@ void get_{{graph_name}}_ctx(Context& ctx, {%for ph in placeholders%}Tensor* inpu {% else %} void get_{{graph_name}}_ctx(Context& ctx); {% endif %} -#endif // _{{header_guard}} \ No newline at end of file +#endif // _{{header_guard}} diff --git a/utensor_cgen/snippets/templates/snippets/qrelu_op.cpp b/utensor_cgen/snippets/templates/snippets/qrelu_op.cpp index 3dcf4b11..4f80df7f 100644 --- a/utensor_cgen/snippets/templates/snippets/qrelu_op.cpp +++ b/utensor_cgen/snippets/templates/snippets/qrelu_op.cpp @@ -2,9 +2,15 @@ S_TENSOR {%for sptr_name in sptr_names[:-1]%}{{sptr_name}}, {%endfor%} {{sptr_names[-1]}}; {% endif %} { + {%if ref_counts%} ctx.add(new RamTensor<{{qout_dtype}}>(), "{{outputs[0]}}", {{ref_counts[0]}}); ctx.add(new RamTensor<{{out_dtypes[0]}}>({1}), "{{outputs[1]}}", {{ref_counts[1]}}); ctx.add(new RamTensor<{{out_dtypes[1]}}>({1}), "{{outputs[2]}}", {{ref_counts[2]}}); + {%else%} + ctx.add(new RamTensor<{{qout_dtype}}>(), "{{outputs[0]}}"); + ctx.add(new RamTensor<{{out_dtypes[0]}}>({1}), "{{outputs[1]}}"); + ctx.add(new RamTensor<{{out_dtypes[1]}}>({1}), "{{outputs[2]}}"); + {%endif%} ctx.push(new ReluOp<{{in_dtype}}, {{out_dtypes[0]}}, {{qout_dtype}}>(), { {% for tname in inputs[:-1]%}"{{tname}}", {% endfor %}"{{inputs[-1]}}" }, { {% for tname in outputs[:-1]%}"{{tname}}", {% endfor %}"{{outputs[-1]}}" }); diff --git a/utensor_cgen/snippets/templates/snippets/requant_op.cpp b/utensor_cgen/snippets/templates/snippets/requant_op.cpp index 503ec5d1..4bac1905 100644 --- a/utensor_cgen/snippets/templates/snippets/requant_op.cpp +++ b/utensor_cgen/snippets/templates/snippets/requant_op.cpp @@ -1,10 +1,16 @@ {% if create_sptr %} S_TENSOR {%for sptr_name in sptr_names[:-1]%}{{sptr_name}}, {%endfor%} {{sptr_names[-1]}}; {% endif %} -{ +{ + {%if ref_counts%} ctx.add(new RamTensor<{{qout_dtype}}>(), "{{outputs[0]}}", {{ref_counts[0]}}); ctx.add(new RamTensor<{{range_dtype}}>({1}), "{{outputs[1]}}", {{ref_counts[1]}}); ctx.add(new RamTensor<{{range_dtype}}>({1}), "{{outputs[2]}}", {{ref_counts[2]}}); + {%else%} + ctx.add(new RamTensor<{{qout_dtype}}>(), "{{outputs[0]}}"); + ctx.add(new RamTensor<{{range_dtype}}>({1}), "{{outputs[1]}}"); + ctx.add(new RamTensor<{{range_dtype}}>({1}), "{{outputs[2]}}"); + {%endif%} ctx.push(new RequantizeOp(), { {% for tname in inputs[:-1]%}"{{tname}}", {% endfor %}"{{inputs[-1]}}" }, { {% for tname in outputs[:-1]%}"{{tname}}", {% endfor %}"{{outputs[-1]}}" }); diff --git a/utensor_cgen/snippets/templates/snippets/requant_range_op.cpp b/utensor_cgen/snippets/templates/snippets/requant_range_op.cpp index d0a1c25e..97e126cf 100644 --- a/utensor_cgen/snippets/templates/snippets/requant_range_op.cpp +++ b/utensor_cgen/snippets/templates/snippets/requant_range_op.cpp @@ -2,8 +2,13 @@ S_TENSOR {%for sptr_name in sptr_names[:-1]%}{{sptr_name}}, {%endfor%} {{sptr_names[-1]}}; {% endif %} { + {%if ref_counts%} ctx.add(new RamTensor<{{out_dtype}}>({1}), "{{outputs[0]}}", {{ref_counts[0]}}); ctx.add(new RamTensor<{{out_dtype}}>({1}), "{{outputs[1]}}", {{ref_counts[1]}}); + {%else%} + ctx.add(new RamTensor<{{out_dtype}}>({1}), "{{outputs[0]}}"); + ctx.add(new RamTensor<{{out_dtype}}>({1}), "{{outputs[1]}}"); + {%endif%} ctx.push(new Requantization_RangeOp(), { {%for tname in inputs[:-1]%}"{{tname}}", {% endfor %}"{{inputs[-1]}}" }, { {%for tname in outputs[:-1]%}"{{tname}}", {% endfor %}"{{outputs[-1]}}" }); diff --git a/utensor_cgen/snippets/templates/snippets/weight_snippet.hpp b/utensor_cgen/snippets/templates/snippets/weight_snippet.hpp new file mode 100644 index 00000000..2ccf15fc --- /dev/null +++ b/utensor_cgen/snippets/templates/snippets/weight_snippet.hpp @@ -0,0 +1,3 @@ +#include + +const {{ type }} {{ inline_name }} [ {{ length }} ] = { {% for item in value %} {{ item }}, {% endfor %} }; diff --git a/utensor_cgen/transformer/ns_transformer.py b/utensor_cgen/transformer/ns_transformer.py index 2d5046d7..07c277ef 100644 --- a/utensor_cgen/transformer/ns_transformer.py +++ b/utensor_cgen/transformer/ns_transformer.py @@ -4,25 +4,41 @@ Transformers that get rid of namescope/nodes which are not needed for inference """ +import re from collections import defaultdict from copy import deepcopy -import re -from utensor_cgen.ir import uTensorGraph, OperationInfo +from utensor_cgen.ir import OperationInfo, uTensorGraph from utensor_cgen.utils import parse_tensor_name + from .base import Transformer -__all__ = ["DropoutTransformer", "BatchNormTransformer"] +__all__ = ["DropoutTransformer", "BatchNormTransformer", "InlineTransformer"] + +class InlineTransformer(Transformer): + METHOD_NAME = 'inline' + KWARGS_NAMESCOPE = '_utensor_inline' + TARGET_NODENAME_PATTERN = re.compile(r'(const[_\w\d]*)/.*') + + def transform(self, ugraph): + for node_name in ugraph.topo_order: + op_type = ugraph.ops_info[node_name].op_type + if op_type == 'Const': + op_info = ugraph.ops_info[node_name] + op_info.op_type = 'Inline' + + return ugraph class DropoutTransformer(Transformer): """Remove Dropout Op """ METHOD_NAME = 'dropout' - KWARGS_NAMESCOPE = '_dropout' + KWARGS_NAMESCOPE = '_utensor_dropout' TARGET_NODENAME_PATTERN = re.compile(r'(dropout[_\w\d]*)/.*') def transform(self, ugraph): + new_graph = uTensorGraph() dropout_input_map = self._find_input(ugraph) new_ops_info = {} new_topo_order = [] @@ -50,10 +66,10 @@ def transform(self, ugraph): output_tensors=out_t_infos, op_type=op_info.op_type, backend=op_info.backend, - op_attr=op_attr) + op_attr=op_attr, + ugraph=new_graph) new_ops_info[node_name] = new_op_info new_topo_order.append(node_name) - new_graph = uTensorGraph() new_graph.ops_info = new_ops_info new_graph.topo_order = new_topo_order new_graph.output_nodes = deepcopy(ugraph.output_nodes) diff --git a/utensor_cgen/transformer/optimizer.py b/utensor_cgen/transformer/optimizer.py index 322f66bc..5714bb89 100644 --- a/utensor_cgen/transformer/optimizer.py +++ b/utensor_cgen/transformer/optimizer.py @@ -11,7 +11,7 @@ class RefCntOptimizer(Transformer): METHOD_NAME = 'refcnt' - KWARGS_NAMESCOPE = '_refcnt' + KWARGS_NAMESCOPE = '_utensor_refcnt' def __init__(self, **kwargs): self.prune_graph = False diff --git a/utensor_cgen/transformer/pipline.py b/utensor_cgen/transformer/pipline.py index 5b2ed733..b26c720c 100644 --- a/utensor_cgen/transformer/pipline.py +++ b/utensor_cgen/transformer/pipline.py @@ -1,5 +1,6 @@ from .optimizer import RefCntOptimizer from .ns_transformer import DropoutTransformer, BatchNormTransformer +from .ns_transformer import InlineTransformer from .quantize import QuantizeTransformer from .base import Transformer from utensor_cgen.utils import NamescopedKWArgsParser @@ -10,7 +11,8 @@ class TransformerPipeline(object): RefCntOptimizer.METHOD_NAME: RefCntOptimizer, DropoutTransformer.METHOD_NAME: DropoutTransformer, BatchNormTransformer.METHOD_NAME: BatchNormTransformer, - QuantizeTransformer.METHOD_NAME: QuantizeTransformer + QuantizeTransformer.METHOD_NAME: QuantizeTransformer, + InlineTransformer.METHOD_NAME: InlineTransformer } def __init__(self, methods, kwargs): diff --git a/utensor_cgen/utils.py b/utensor_cgen/utils.py index c7322057..7c0173ab 100644 --- a/utensor_cgen/utils.py +++ b/utensor_cgen/utils.py @@ -3,6 +3,7 @@ import re from copy import deepcopy +from click.types import ParamType import numpy as np import idx2numpy as idx2np import tensorflow as tf @@ -151,3 +152,23 @@ def __getitem__(self, argname): return self._private_kwargs[argname] except KeyError: return self._shared_kwargs[argname] + +class NArgsParam(ParamType): + + def __init__(self, sep=','): + self._sep = sep + + def convert(self, value, param, ctx): + value = str(value) + args = value.split(self._sep) + aug_args = [arg for arg in args if arg[0] in ['+', '-']] + if aug_args: + final_args = param.default.split(self._sep) + for arg in aug_args: + if arg[0] == '+': + final_args.append(arg[1:]) + elif arg[0] == '-' and arg[1:] in final_args: + final_args.remove(arg[1:]) + else: + final_args = args + return final_args