{"versions":{"0.0.1":{"name":"@tensorflow-models/body-segmentation","version":"0.0.1","description":"Pretrained body segmentation model","main":"dist/index.js","jsnext:main":"dist/body-segmentation.esm.js","module":"dist/body-segmentation.esm.js","unpkg":"dist/body-segmentation.min.js","jsdelivr":"dist/body-segmentation.min.js","types":"dist/index.d.ts","repository":{"type":"git","url":"git+https://github.com/tensorflow/tfjs-models.git"},"peerDependencies":{"@mediapipe/selfie_segmentation":"~0.1.0","@tensorflow/tfjs-backend-webgl":"^3.13.0","@tensorflow/tfjs-converter":"^3.13.0","@tensorflow/tfjs-core":"^3.13.0"},"devDependencies":{"@babel/polyfill":"^7.10.4","@mediapipe/selfie_segmentation":"^0.1.0","@rollup/plugin-commonjs":"^11.0.2","@rollup/plugin-node-resolve":"^7.1.1","@rollup/plugin-typescript":"^3.0.0","@tensorflow/tfjs-backend-cpu":"^3.13.0","@tensorflow/tfjs-backend-webgl":"^3.13.0","@tensorflow/tfjs-converter":"^3.13.0","@tensorflow/tfjs-core":"^3.13.0","@types/jasmine":"~3.4.0","babel-core":"~6.26.0","babel-plugin-transform-runtime":"~6.23.0","jasmine":"~3.1.0","jasmine-core":"~3.1.0","karma":"~6.3.1","karma-browserstack-launcher":"~1.6.0","karma-chrome-launcher":"~2.2.0","karma-jasmine":"~1.1.0","karma-spec-reporter":"~0.0.32","karma-typescript":"~5.5.1","karma-typescript-es6-transform":"^5.1.0","rollup":"~2.3.2","rollup-plugin-terser":"~7.0.2","rollup-plugin-visualizer":"~3.3.2","ts-node":"~8.8.2","tslint":"~6.1.3","tslint-no-circular-imports":"~0.7.0","typescript":"~3.9.9","yalc":"~1.0.0-pre.21"},"scripts":{"bundle":"rollup -c","build":"rimraf dist && tsc && yarn bundle","publish-local":"yarn build && rollup -c && yalc publish","run-browserstack":"karma start --browserstack","test-node":"NODE_PRESERVE_SYMLINKS=1 ts-node --skip-ignore --project tsconfig.test.json run_tests.ts","test":"yarn && karma start","test-ci":"yarn run-browserstack --browsers=bs_chrome_mac","build-npm":"yarn build && yarn bundle","lint":"tslint -p . -t verbose"},"license":"Apache-2.0","dependencies":{"rimraf":"^3.0.2"},"bugs":{"url":"https://github.com/tensorflow/tfjs-models/issues"},"homepage":"https://github.com/tensorflow/tfjs-models#readme","_id":"@tensorflow-models/body-segmentation@0.0.1","_nodeVersion":"15.14.0","_npmVersion":"7.7.6","dist":{"integrity":"sha512-fNYN8+VsW0mR8KYlzKt/FGqE5UzqLB4+BtpK+cKt6fJ4utwF+8CA1TJruAchNKejqzFm6m4XMx05v1DdzeMZlQ==","shasum":"21aa9f2cda89bd74c1d6a88639568bd25fb8d862","tarball":"http://123.232.10.234:8212/nexus/content/groups/npm-public/@tensorflow-models/body-segmentation/-/body-segmentation-0.0.1.tgz","fileCount":328,"unpackedSize":1366973,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.13\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJh8wtaCRA9TVsSAnZWagAAumAQAIHc7oRc2nGzyjXlpPtw\nhDIf5+WkC7OtVFP0EwJwtdGheQgawsJcuajZZrRAvvVcTk9FNmJ4oZmoVZuD\nU+iIx6LXmEaxwTQuoDczb43I7LuvPJ2ofxD8Cy6xxMktQQn49tTyUlj0crG0\nc3WuIAd3UEwu1Q5MZIARCEoxUBhbcudNuGkhVXeAyakatAwgMmv0DPpE2Mq8\nFj45UEqoLtE7Y6Y3JihFtfEiUWJRIk9366eShZpi5koYGxv6h/PRmnnuSlC3\naIpnMWc/cHuiAb6AwUyNnBPSlZ6tKLzRZUDHW/Ers1ZTOpwLvMRXeL7jVHFU\norkO15wGx5jszz3T1/SGFohAPsfVNNV3Bkx0aCHXPfl9YdLnMGwKRVJjZmoG\nQoD3B/fQt9Dxhb4HiRd77lZbkuNWxgw04XtT1x/+aTvpaAtp5hFhAxKGzCoD\nq5C/P4cdNSg0E8aI9i/JZM/SfVkDA7Q7qy48La59reXGaMyOfNGa4x1mMkdZ\nOmTM+ROjS86MNuYUp55/TjBuEwjrY4YkExl1gGYLSqbagBosBd8y0MkMie05\nkGDy8us+taBSFTI0CvR+xO2tgi0P3UXyscZCcluPP350HHqpIKGj393wB2nI\nQNnn8aRcE0URDTT+1lkiQTmY+cbmp2K3AxpAQofgQV0ZTv2wzT48GOFMjPCc\n/dk5\r\n=10/I\r\n-----END PGP SIGNATURE-----\r\n","signatures":[{"keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA","sig":"MEUCIQCV3JR7+5q6nLrJmwOx3TZvEpz/dJEME7HEccvjJ0XWYwIgMhmm9YpYLDYoP92lNw1kicUiDQJJ9VQg9vUNHsS92wg="}]},"_npmUser":{"name":"anonymous","email":"ahmed13579@gmail.com"},"directories":{},"maintainers":[{"name":"anonymous","email":"ahmed13579@gmail.com"},{"name":"anonymous","email":"nsthorat@google.com"},{"name":"anonymous","email":"dsmilkov@gmail.com"},{"name":"anonymous","email":"cais@google.com"},{"name":"anonymous","email":"ping.yu.11@gmail.com"},{"name":"anonymous","email":"annyuan@gmail.com"},{"name":"anonymous","email":"yassogba@gmail.com"},{"name":"anonymous","email":"matthew@soulanille.net"},{"name":"anonymous","email":"linazhao128@gmail.com"},{"name":"anonymous","email":"jinjingforever@gmail.com"}],"_npmOperationalInternal":{"host":"s3://npm-registry-packages","tmp":"tmp/body-segmentation_0.0.1_1643318106242_0.07983082712327483"},"_hasShrinkwrap":false,"contributors":[]},"1.0.0":{"name":"@tensorflow-models/body-segmentation","version":"1.0.0","description":"Pretrained body segmentation model","main":"dist/index.js","jsnext:main":"dist/body-segmentation.esm.js","module":"dist/body-segmentation.esm.js","unpkg":"dist/body-segmentation.min.js","jsdelivr":"dist/body-segmentation.min.js","types":"dist/index.d.ts","repository":{"type":"git","url":"git+https://github.com/tensorflow/tfjs-models.git"},"peerDependencies":{"@mediapipe/selfie_segmentation":"~0.1.0","@tensorflow/tfjs-backend-webgl":"^3.13.0","@tensorflow/tfjs-converter":"^3.13.0","@tensorflow/tfjs-core":"^3.13.0"},"devDependencies":{"@babel/polyfill":"^7.10.4","@mediapipe/selfie_segmentation":"^0.1.0","@rollup/plugin-commonjs":"^11.0.2","@rollup/plugin-node-resolve":"^7.1.1","@rollup/plugin-typescript":"^3.0.0","@tensorflow/tfjs-backend-cpu":"^3.13.0","@tensorflow/tfjs-backend-webgl":"^3.13.0","@tensorflow/tfjs-converter":"^3.13.0","@tensorflow/tfjs-core":"^3.13.0","@types/jasmine":"~3.4.0","babel-core":"~6.26.0","babel-plugin-transform-runtime":"~6.23.0","jasmine":"~3.1.0","jasmine-core":"~3.1.0","karma":"~6.3.1","karma-browserstack-launcher":"~1.6.0","karma-chrome-launcher":"~2.2.0","karma-jasmine":"~1.1.0","karma-spec-reporter":"~0.0.32","karma-typescript":"~5.5.1","karma-typescript-es6-transform":"^5.1.0","rollup":"~2.3.2","rollup-plugin-terser":"~7.0.2","rollup-plugin-visualizer":"~3.3.2","ts-node":"~8.8.2","tslint":"~6.1.3","tslint-no-circular-imports":"~0.7.0","typescript":"~3.9.9","yalc":"~1.0.0-pre.21"},"scripts":{"bundle":"rollup -c","build":"rimraf dist && tsc && yarn bundle","publish-local":"yarn build && rollup -c && yalc publish","run-browserstack":"karma start --browserstack","test-node":"NODE_PRESERVE_SYMLINKS=1 ts-node --skip-ignore --project tsconfig.test.json run_tests.ts","test":"yarn && karma start","test-ci":"yarn run-browserstack --browsers=bs_chrome_mac","build-npm":"yarn build && yarn bundle","lint":"tslint -p . -t verbose"},"license":"Apache-2.0","dependencies":{"rimraf":"^3.0.2"},"bugs":{"url":"https://github.com/tensorflow/tfjs-models/issues"},"homepage":"https://github.com/tensorflow/tfjs-models#readme","_id":"@tensorflow-models/body-segmentation@1.0.0","_nodeVersion":"15.14.0","_npmVersion":"7.7.6","dist":{"integrity":"sha512-LMXulkLNv7DNict/Tlg/TW1ACVx/fgnxdnuZG6VQ0tcDBULqfe1kmVpXQGLA19FHJrdRqggGt/jL3i356e83eQ==","shasum":"3f2441e142c596669f93d22adc16def3d12486ff","tarball":"http://123.232.10.234:8212/nexus/content/groups/npm-public/@tensorflow-models/body-segmentation/-/body-segmentation-1.0.0.tgz","fileCount":334,"unpackedSize":1383648,"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v4.10.10\r\nComment: https://openpgpjs.org\r\n\r\nwsFzBAEBCAAGBQJiH+OSACEJED1NWxICdlZqFiEECWMYAoorWMhJKdjhPU1b\r\nEgJ2Vmq+hBAAobWR8tRtkZzO1iohGzwbu0Z8qdcetbG32V9/CpteydxqgwVj\r\ntJL2YcYXSP9+Gi1s++tu1ls1vr2PckPWHwH4v07FuktIbStpUEgNsM5bEadv\r\nArwq4SWDJpW4w1lL2/azKyOEF4YDaIz5RuGT1QWpf/G4kBRdnMNDwR7eEphC\r\noGpxt8d1s+5moUHDyQz60UhI1gwhwqsqfmI0iv9qDqRwJkEYSIOqDddNoeV0\r\nLSn1hB9ApJjASw69wdUS2PeEPSpYUKCyXzCDmptqCSvnWUEpsUXx4AOA3xXR\r\nCkrGYdNGBBwZqB9rx3TmKVepX4iA5+vu44TWV7PVnRglrq1dqDtedJx1Oo5T\r\nMnDzWwU7C0G6JxBBXOsm9zrAD8opqlQvDcKbjzAb+2kU6KZoStgqESHhXcsQ\r\ni5kah5J9IS0jfnNJjEmMmGZiDvTb5UY4xp2jCDJcTu5IbE8oKeB9sY9OmH6a\r\nZrwcwB37ToTjTK0kvSXXsYThQcYSIAvUUMGRVE8KlFhukVY4brkzj4q886FC\r\npDjSaiW+EdHtl2YTLOYR6kkf4HewW2d53jlRGHZlfqGKTMgq5hc2ikMpg1YV\r\n9GhIo6hsS8bVyjcGEdNL4FXIDuCD91xKzuitL159gavwcuXi0sxgo53/ItQL\r\n1yLgJLO7mBc3XwFtZ3SqEO8UKeA/mq0Ny44=\r\n=zQq2\r\n-----END PGP SIGNATURE-----\r\n","signatures":[{"keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA","sig":"MEUCIGT83ITCIh4CpdtQvyOpYh4jmJoYx8rKPxyhFocPhuegAiEAoO8klcrWLfAq7x6GDrEVBiNNP8fZsR0yAg+rT8tWENI="}]},"_npmUser":{"name":"anonymous","email":"ahmed13579@gmail.com"},"directories":{},"maintainers":[{"name":"anonymous","email":"ahmed13579@gmail.com"},{"name":"anonymous","email":"nsthorat@google.com"},{"name":"anonymous","email":"dsmilkov@gmail.com"},{"name":"anonymous","email":"cais@google.com"},{"name":"anonymous","email":"ping.yu.11@gmail.com"},{"name":"anonymous","email":"annyuan@gmail.com"},{"name":"anonymous","email":"yassogba@gmail.com"},{"name":"anonymous","email":"matthew@soulanille.net"},{"name":"anonymous","email":"linazhao128@gmail.com"},{"name":"anonymous","email":"jinjingforever@gmail.com"}],"_npmOperationalInternal":{"host":"s3://npm-registry-packages","tmp":"tmp/body-segmentation_1.0.0_1646257042622_0.281487179108735"},"_hasShrinkwrap":false,"contributors":[]},"1.0.1":{"name":"@tensorflow-models/body-segmentation","version":"1.0.1","description":"Pretrained body segmentation model","main":"dist/index.js","jsnext:main":"dist/body-segmentation.esm.js","module":"dist/body-segmentation.esm.js","unpkg":"dist/body-segmentation.min.js","jsdelivr":"dist/body-segmentation.min.js","types":"dist/index.d.ts","repository":{"type":"git","url":"git+https://github.com/tensorflow/tfjs-models.git"},"peerDependencies":{"@mediapipe/selfie_segmentation":"~0.1.0","@tensorflow/tfjs-backend-webgl":"^3.13.0","@tensorflow/tfjs-converter":"^3.13.0","@tensorflow/tfjs-core":"^3.13.0"},"devDependencies":{"@babel/polyfill":"^7.10.4","@mediapipe/selfie_segmentation":"^0.1.0","@rollup/plugin-commonjs":"^11.0.2","@rollup/plugin-node-resolve":"^7.1.1","@rollup/plugin-typescript":"^3.0.0","@tensorflow/tfjs-backend-cpu":"^3.13.0","@tensorflow/tfjs-backend-webgl":"^3.13.0","@tensorflow/tfjs-converter":"^3.13.0","@tensorflow/tfjs-core":"^3.13.0","@types/jasmine":"~3.4.0","babel-core":"~6.26.0","babel-plugin-transform-runtime":"~6.23.0","jasmine":"~3.1.0","jasmine-core":"~3.1.0","karma":"~6.3.1","karma-browserstack-launcher":"~1.6.0","karma-chrome-launcher":"~2.2.0","karma-jasmine":"~1.1.0","karma-spec-reporter":"~0.0.32","karma-typescript":"~5.5.1","karma-typescript-es6-transform":"^5.1.0","rollup":"~2.3.2","rollup-plugin-terser":"~7.0.2","rollup-plugin-visualizer":"~3.3.2","ts-node":"~8.8.2","tslint":"~6.1.3","tslint-no-circular-imports":"~0.7.0","typescript":"~3.9.9","yalc":"~1.0.0-pre.21"},"scripts":{"bundle":"rollup -c","build":"rimraf dist && tsc && yarn bundle","publish-local":"yarn build && rollup -c && yalc publish","run-browserstack":"karma start --browserstack","test-node":"NODE_PRESERVE_SYMLINKS=1 ts-node --skip-ignore --project tsconfig.test.json run_tests.ts","test":"yarn && karma start","test-ci":"yarn run-browserstack --browsers=bs_chrome_mac","build-npm":"yarn build && yarn bundle","lint":"tslint -p . -t verbose"},"license":"Apache-2.0","dependencies":{"rimraf":"^3.0.2"},"bugs":{"url":"https://github.com/tensorflow/tfjs-models/issues"},"homepage":"https://github.com/tensorflow/tfjs-models#readme","_id":"@tensorflow-models/body-segmentation@1.0.1","_nodeVersion":"15.14.0","_npmVersion":"7.7.6","dist":{"integrity":"sha512-IPQ+a848KDZ/yzxJwP+GDPGwa6RLJ3+VbFraxlSWxAfygqTKx0EQQWHUawbCKxILtlzcfeinW+Gdcyiq0Onkfw==","shasum":"333429a8202252185f0cc6ffba726b82f71943eb","tarball":"http://123.232.10.234:8212/nexus/content/groups/npm-public/@tensorflow-models/body-segmentation/-/body-segmentation-1.0.1.tgz","fileCount":334,"unpackedSize":1389410,"signatures":[{"keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA","sig":"MEQCIAWoo3DtAPiTzqGKfoj6y15MhuVCu9N2j5+Fds4G9ouiAiATizlBmAE0Rc1REm3ujykx90esDXO78YGWTap2tgosFg=="}],"npm-signature":"-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v4.10.10\r\nComment: https://openpgpjs.org\r\n\r\nwsFzBAEBCAAGBQJieVa1ACEJED1NWxICdlZqFiEECWMYAoorWMhJKdjhPU1b\r\nEgJ2VmoAqBAAoBJSzwCeoFw71Chv73EGWGxk1uCv8X2tkNI59UEN+AWIyOUM\r\nNCVPI0ZG41naGByaVw4JC2llWSuNdqvIjr9UrSpay2OlmSBPa2s4x75VZLdh\r\nRLye1nPp2bsI5E4nv07yza25ih4HL9bGq6XB8YuboUgF3ZsearNQA5KB/9ID\r\nRFXV/Iiobatf+bNW5KdsWL9IB2blYnoc/avvT9aX5K3y9r6VUkZd7hExFmG+\r\n5o+lUZXqCTH5uujVtVex7RP9dXmv1B1dNyn+G54X51x+RvQU0WblGgil7TlG\r\nHjp1GxG7mUaKWn2B2tKiwXb1tAwk660zdwldqUZRw5jmYHHMVMa6UUM7rdwq\r\nVXoLW/fNTEpWGHbExEjwoBiN+F9WggEkQx4FWE6AC99nzQvpGPncMwWdQwTZ\r\n/7Up9uIzTenX8qhs1laJvxGt/mTBJybCFk+gUAqdtsg772i6vmgrd3O7RaTX\r\nEey2Jse6BscbBd50B5BDC8YqpHpDi8/9FEb51Nwx8BNxsBANYkA+oLuqlbzr\r\nOG3UGIb+6cL26YhOKzSmYTsvDL09V8i0p+PEMFkTycuGKzVPwBKx+iOLbYb8\r\nA1o72vzzFm/2kRK8HUHA+Rt0J0E6hkdGEZDjLsvFi4HKtnmMplw0ksytoXh+\r\n9Me8EBFScKU+S2MsAzIujRnnp/PhjTKHz6A=\r\n=eVct\r\n-----END PGP SIGNATURE-----\r\n"},"_npmUser":{"name":"anonymous","email":"ahmed13579@gmail.com"},"directories":{},"maintainers":[{"name":"anonymous","email":"ahmed13579@gmail.com"},{"name":"anonymous","email":"nsthorat@google.com"},{"name":"anonymous","email":"dsmilkov@gmail.com"},{"name":"anonymous","email":"cais@google.com"},{"name":"anonymous","email":"ping.yu.11@gmail.com"},{"name":"anonymous","email":"annyuan@gmail.com"},{"name":"anonymous","email":"yassogba@gmail.com"},{"name":"anonymous","email":"matthew@soulanille.net"},{"name":"anonymous","email":"linazhao128@gmail.com"},{"name":"anonymous","email":"jinjingforever@gmail.com"}],"_npmOperationalInternal":{"host":"s3://npm-registry-packages","tmp":"tmp/body-segmentation_1.0.1_1652119221740_0.6863991313566837"},"_hasShrinkwrap":false,"contributors":[]},"1.0.2":{"name":"@tensorflow-models/body-segmentation","version":"1.0.2","description":"Pretrained body segmentation model","main":"dist/index.js","jsnext:main":"dist/body-segmentation.esm.js","module":"dist/body-segmentation.esm.js","unpkg":"dist/body-segmentation.min.js","jsdelivr":"dist/body-segmentation.min.js","types":"dist/index.d.ts","repository":{"type":"git","url":"git+https://github.com/tensorflow/tfjs-models.git"},"peerDependencies":{"@mediapipe/selfie_segmentation":"~0.1.0","@tensorflow/tfjs-backend-webgl":"^4.9.0","@tensorflow/tfjs-converter":"^4.9.0","@tensorflow/tfjs-core":"^4.9.0"},"devDependencies":{"@babel/polyfill":"^7.10.4","@mediapipe/selfie_segmentation":"^0.1.0","@rollup/plugin-commonjs":"^11.0.2","@rollup/plugin-node-resolve":"^7.1.1","@rollup/plugin-typescript":"^3.0.0","@tensorflow/tfjs-backend-cpu":"^4.9.0","@tensorflow/tfjs-backend-webgl":"^4.9.0","@tensorflow/tfjs-converter":"^4.9.0","@tensorflow/tfjs-backend-webgpu":"4.9.0","@tensorflow/tfjs-core":"^4.9.0","@types/jasmine":"~3.4.0","babel-core":"~6.26.0","babel-plugin-transform-runtime":"~6.23.0","jasmine":"~3.1.0","jasmine-core":"~3.1.0","karma":"~6.3.1","karma-browserstack-launcher":"~1.6.0","karma-chrome-launcher":"~2.2.0","karma-jasmine":"~1.1.0","karma-spec-reporter":"~0.0.32","karma-typescript":"~5.5.1","karma-typescript-es6-transform":"^5.1.0","rollup":"~2.3.2","rollup-plugin-terser":"~7.0.2","rollup-plugin-visualizer":"~3.3.2","ts-node":"~8.8.2","tslint":"~6.1.3","tslint-no-circular-imports":"~0.7.0","typescript":"~4.4.0","yalc":"~1.0.0-pre.21"},"scripts":{"bundle":"rollup -c","build":"rimraf dist && tsc && yarn bundle","publish-local":"yarn build && rollup -c && yalc publish","run-browserstack":"karma start --browserstack","test-node":"NODE_PRESERVE_SYMLINKS=1 ts-node --skip-ignore --project tsconfig.test.json run_tests.ts","test":"yarn && karma start","test-ci":"yarn run-browserstack --browsers=bs_chrome_mac","build-npm":"yarn build && yarn bundle","lint":"tslint -p . -t verbose"},"license":"Apache-2.0","dependencies":{"rimraf":"^3.0.2"},"_id":"@tensorflow-models/body-segmentation@1.0.2","gitHead":"dd5f6ac96e71b7994647534975cbfb614097b8f3","bugs":{"url":"https://github.com/tensorflow/tfjs-models/issues"},"homepage":"https://github.com/tensorflow/tfjs-models#readme","_nodeVersion":"20.5.0","_npmVersion":"9.8.0","dist":{"integrity":"sha512-sbPiL8wpqfKqh01of6qguZzU9yLWOQDTwiPEIHFPA/EAjz5T51LKHKIySll+mmteRo/TxNED8pxd9VJ6q2r7kg==","shasum":"3892f87b05e29ead61f66eaefdda113cc19a31b0","tarball":"http://123.232.10.234:8212/nexus/content/groups/npm-public/@tensorflow-models/body-segmentation/-/body-segmentation-1.0.2.tgz","fileCount":334,"unpackedSize":1394966,"signatures":[{"keyid":"SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA","sig":"MEYCIQDakEpQE7feagFkGk19KiPLeF9IVBCZIGP2OD8ZhoYUBwIhAIKfAzE5NG533UaT7l43/S0osvMFKaDuPJTgd/orDCkt"}],"size":241558},"_npmUser":{"name":"anonymous","email":"yaofengwu123@gmail.com"},"directories":{},"maintainers":[{"name":"anonymous","email":"Dedongala@google.com"},{"name":"anonymous","email":"yaofengwu123@gmail.com"},{"name":"anonymous","email":"linchan@google.com"},{"name":"anonymous","email":"cais@google.com"},{"name":"anonymous","email":"ping.yu.11@gmail.com"},{"name":"anonymous","email":"annyuan@gmail.com"},{"name":"anonymous","email":"matthew@soulanille.net"},{"name":"anonymous","email":"linazhao128@gmail.com"},{"name":"anonymous","email":"jinjingforever@gmail.com"}],"_npmOperationalInternal":{"host":"s3://npm-registry-packages","tmp":"tmp/body-segmentation_1.0.2_1690833688298_0.5114802329797927"},"_hasShrinkwrap":false,"_cnpmcore_publish_time":"2023-07-31T20:01:28.526Z","publish_time":1690833688526,"_source_registry_name":"default","contributors":[]}},"dist-tags":{"latest":"1.0.2"},"name":"@tensorflow-models/body-segmentation","time":{"created":"2022-01-27T22:35:31.260Z","0.0.1":"2022-01-27T21:15:06.784Z","modified":"2025-05-08T01:59:27.080Z","1.0.0":"2022-03-02T21:37:22.882Z","1.0.1":"2022-05-09T18:00:21.970Z","1.0.2":"2023-07-31T20:01:28.526Z"},"readme":"# Body Segmentation\n\nThis package provides models for running real-time body segmentation.\n\nCurrently, we provide 2 model options:\n\n#### MediaPipe SelfieSegmentation:\n\nMediaPipe SelfieSegmentation segments the prominent humans in the scene. It can run in real-time on both smartphones and laptops. The intended use cases include selfie effects and video conferencing, where the person is close (< 2m) to the camera.\n\n#### BodyPix:\n\nBodyPix can be used to segment an image into pixels that are and are not part of a person, and into pixels that belong to each of twenty-four body parts. It works for multiple people in an input image or video.\n\n-------------------------------------------------------------------------------\n## Table of Contents\n1. [How to Run It](#how-to-run-it)\n2. [Example Code and Demos](#example-code-and-demos)\n3. [Output Visualization Utility Functions](#output-visualization-utility-functions)\n\n-------------------------------------------------------------------------------\n## How to Run It\nIn general there are two steps:\n\nYou first create a detector by choosing one of the models from `SupportedModels`,\nincluding `MediaPipeSelfieSegmentation` and `BodyPix`.\n\nFor example:\n\n```javascript\nconst model = bodySegmentation.SupportedModels.MediaPipeSelfieSegmentation;\nconst segmenterConfig = {\n  runtime: 'mediapipe', // or 'tfjs'\n  solutionPath: 'https://cdn.jsdelivr.net/npm/@mediapipe/selfie_segmentation',\n  modelType: 'general'\n}\nconst segmenter = await bodySegmentation.createSegmenter(model, segmenterConfig);\n```\n\nThen you can use the segmenter to segment people in the image.\n\n```javascript\nconst people = await segmenter.segmentPeople(image);\n```\n\nThe returned segmentation list contains the detected people in the image.\nNote that it is not necessarily the case that there will be one segmentation per\none person. Each model will have its own semantics for the segmentation output.\n\nMediaPipe SelfieSegmentation returns exactly one segmentation corresponding to all people in the input image.\n\nBodyPix returns exactly one segmentation corresponding to all people in the input image if `multiSegmentation` option is false, and otherwise will return multiple segmentations, one per person.\n\nExample output:\n```\n[\n  {\n    maskValueToLabel: (maskValue: number) => { return 'person' },\n    mask: {\n      toCanvasImageSource(): ...\n      toImageData(): ...\n      toTensor(): ...\n      getUnderlyingType(): ...\n    }\n  }\n]\n```\n\nThe `mask` key stores an object which provides access to the underlying mask image using the conversion functions toCanvasImageSource, toImageData, and toTensor depending on the desired output type. Note that getUnderlyingType can be queried to determine what is the type being used underneath the hood to avoid expensive conversions (such as from tensor to image data).\n\nThe semantics of the RGBA values of the `mask` is as follows: the image mask is the same size as the input image, where green and blue channels are always set to 0. Different red values denote different body parts (see maskValueToLabel key below). Different alpha values denote the probability of pixel being a body part pixel (0 being lowest probability and 255 being highest).\n\n`maskValueToLabel` maps a foreground pixel’s red value to the segmented part name of that pixel. Should throw error for unsupported input values. This is not necessarily the same across different models (for example MediaPipeSelfieSegmentation will always return 'person' since it does not distinguish individual body parts).\n\nRefer to each model's documentation for specific configurations for the model\nand their performance.\n\n[MediaPipeSelfieSegmentation MediaPipe Documentation](https://github.com/tensorflow/tfjs-models/tree/master/body-segmentation/src/selfie_segmentation_mediapipe)\n\n[MediaPipeSelfieSegmentation TFJS Documentation](https://github.com/tensorflow/tfjs-models/tree/master/body-segmentation/src/selfie_segmentation_tfjs)\n\n[BodyPix Documentation](https://github.com/tensorflow/tfjs-models/tree/master/body-segmentation/src/body_pix)\n\n-------------------------------------------------------------------------------\n\n## Example Code and Demos\nYou may reference the demos for code examples. Details for how to run the demos\nare included in the `demos/`\n[folder](https://github.com/tensorflow/tfjs-models/tree/master/body-segmentation/demos).\n\n## Output Visualization Utility Functions\n\nBody Segmentation provides utility functions to help with drawing and compositing using the outputs. These utility functions are based on the ones provided by the deprecated [BodyPix Package](https://github.com/tensorflow/tfjs-models/tree/master/body-pix#output-visualization-utility-functions).\n\n### bodySegmentation.toBinaryMask\n\nGiven a segmentation or array of segmentations, generates an image with foreground and background color at each pixel determined by the corresponding binary segmentation value at the pixel from the output.  In other words, pixels where there is a person will be colored with foreground color and where there is not a person will be colored with background color. This can be used as a mask to crop a person or the background when compositing.\n\n#### Inputs\n\n* **segmentation** Single segmentation or array of segmentations, such as the output from [segmentPeople](#how-to-run-it).\n\n* **foreground** The foreground color (r,g,b,a) for visualizing pixels that belong to people.\n\n* **background** The background color (r,g,b,a) for visualizing pixels that don't belong to people.\n\n* **drawContour** Whether to draw the contour around each person's segmentation mask.\n\n* **foregroundThresholdProbability** The minimum probability to color a pixel as foreground rather than background.\n\n* **foregroundMaskValues** The red channel integer values that represent foreground (for more information refer to the `mask` [output documentation](#how-to-run-it)).\n\n#### Returns\n\nAn [ImageData](https://developer.mozilla.org/en-US/docs/Web/API/ImageData) with the same and width height of the input segmentations, with color and opacity at each pixel determined by the corresponding binary segmentation value at the pixel from the output.\n\n![MaskImageData](./images/toBinaryMask.jpg)\n\n*Given the input shown in the first image above, if it is run through the API's `segmentPeople` method followed by `toBinaryMask`, An [ImageData](https://developer.mozilla.org/en-US/docs/Web/API/ImageData) that either looks like the second image above if setting `foregroundColor` to {r: 0, g: 0, b: 0, a: 0} and `backgroundColor` to {r: 0, g: 0, b: 0, a: 255} (by default), or the third image if if setting `foregroundColor` to {r: 0, g: 0, b: 0, a: 255} and `backgroundColor` to {r: 0, g: 0, b: 0, a: 0}.  This can be used to mask either the person or the background using the method `drawMask`.*\n\n#### Example usage\n\n```javascript\nconst img = document.getElementById('image');\n\nconst segmenter = await bodySegmentation.createSegmenter(bodySegmentation.SupportedModels.MediaPipeSelfieSegmentation);\nconst segmentation = await segmenter.segmentPeople(img);\n\n// The mask image is an binary mask image with a 1 where there is a person and\n// a 0 where there is not.\nconst coloredPartImage = await bodySegmentation.toBinaryMask(segmentation);\nconst opacity = 0.7;\nconst flipHorizontal = false;\nconst maskBlurAmount = 0;\nconst canvas = document.getElementById('canvas');\n// Draw the mask image on top of the original image onto a canvas.\n// The colored part image will be drawn semi-transparent, with an opacity of\n// 0.7, allowing for the original image to be visible under.\nbodySegmentation.drawMask(\n    canvas, img, coloredPartImage, opacity, maskBlurAmount,\n    flipHorizontal);\n```\n\n### bodySegmentation.toColoredMask\n\nGiven a segmentation or array of segmentations, and a function mapping the red pixel values (representing body part labels) to colours, generates an image with the corresponding color for each part at each pixel, and background color used where there is no part.\n\n#### Inputs\n\n* **segmentation** Single segmentation or array of segmentations, such as the output from [segmentPeople](#how-to-run-it).\n\n* **maskValueToColor** A function mapping red channel mask values to colors to use in output image (for more information refer to the `mask` [output documentation](#how-to-run-it)). If using bodyPix with `segmentBodyParts` on, then `bodySegmentation.bodyPixMaskValueToRainbowColor` can be used as a default.\n\n* **background** The background color (r,g,b,a) for visualizing pixels that don't belong to people.\n\n* **foregroundThresholdProbability** The minimum probability to color a pixel as foreground rather than background.\n\n#### Returns\n\nAn [ImageData](https://developer.mozilla.org/en-US/docs/Web/API/ImageData) with the same width and height of the input segmentations, with the corresponding color for each part at each pixel, and the background color where there is no part.\n\n#### Example usage\n\n```javascript\nconst img = document.getElementById('image');\n\nconst segmenter = await bodySegmentation.createSegmenter(bodySegmentation.SupportedModels.BodyPix);\nconst segmentation = await segmenter.segmentPeople(img, {multiSegmentation: false, segmentBodyParts: true});\n\n// The colored part image is an rgb image with a corresponding color from the\n// rainbow colors for each part at each pixel, and black pixels where there is\n// no part.\nconst coloredPartImage = await bodySegmentation.toColoredMask(segmentation, bodySegmentation.bodyPixMaskValueToRainbowColor, {r: 255, g: 255, b: 255, a: 255}));\nconst opacity = 0.7;\nconst flipHorizontal = false;\nconst maskBlurAmount = 0;\nconst canvas = document.getElementById('canvas');\n// Draw the colored part image on top of the original image onto a canvas.\n// The colored part image will be drawn semi-transparent, with an opacity of\n// 0.7, allowing for the original image to be visible under.\nbodySegmentation.drawMask(\n    canvas, img, coloredPartImage, opacity, maskBlurAmount,\n    flipHorizontal);\n```\n\n![toColoredMask](./images/toColoredMask.jpg)\n\n*Given the input shown in the first image above, if it is run through the api using the BodyPix model and the `segmentPeople` method, followed by `toColoredMask`, then a 'spectral' or 'rainbow' color scale in `toColoredMask` will produce an `ImageData` that looks like the second image or the third image above.*\n\n### bodySegmentation.drawMask\n\nDraws an image onto a canvas and draws an `ImageData` containing a mask on top of it with a specified opacity; The `ImageData` is typically generated using `toBinaryMask` or `toColoredMask`.\n\n#### Inputs\n\n* **canvas** The canvas to be drawn onto.\n* **image** The original image to apply the mask to.\n* **maskImage** An ImageData containing the mask.  Ideally this should be generated by `toBinaryMask` or `toColoredMask.`\n* **maskOpacity** The opacity when drawing the mask on top of the image. Defaults to 0.7. Should be a float between 0 and 1.\n* **maskBlurAmount** How many pixels to blur the mask by. Defaults to 0. Should be an integer between 0 and 20.\n* **flipHorizontal** If the result should be flipped horizontally.  Defaults to false.\n\n#### Example usage\n\n```javascript\nconst img = document.getElementById('image');\n\nconst segmenter = await bodySegmentation.createSegmenter(bodySegmentation.SupportedModels.MediaPipeSelfieSegmentation);\nconst segmentation = await segmenter.segmentPeople(img);\n\n// Convert the segmentation into a mask to darken the background.\nconst foregroundColor = {r: 0, g: 0, b: 0, a: 0};\nconst backgroundColor = {r: 0, g: 0, b: 0, a: 255};\nconst backgroundDarkeningMask = await bodySegmentation.toBinaryMask(\n    segmentation, foregroundColor, backgroundColor);\n\nconst opacity = 0.7;\nconst maskBlurAmount = 3;\nconst flipHorizontal = false;\nconst canvas = document.getElementById('canvas');\n// Draw the mask onto the image on a canvas.  With opacity set to 0.7 and\n// maskBlurAmount set to 3, this will darken the background and blur the\n// darkened background's edge.\nawait bodySegmentation.drawMask(\n    canvas, img, backgroundDarkeningMask, opacity, maskBlurAmount, flipHorizontal);\n```\n\n![drawMask](./images/drawMask.jpg)\n\n*The above shows drawing a mask generated by `toBinaryMask` on top of an image and canvas using `drawMask`.  In this case, `segmentationThreshold` was set to a lower value of 0.25, making the mask include more pixels.  The top two images show the mask drawn on top of the image, and the second two images show the mask blurred by setting  `maskBlurAmount` to 9 before being drawn onto the image, resulting in a smoother transition between the person and the masked background.*\n\n### bodySegmentation.drawPixelatedMask\n\nDraws an image onto a canvas and draws an `ImageData` containing a mask on top of it with a specified opacity; The `ImageData` is typically generated using `toColoredMask`. Different from `drawMask`, this rendering function applies the pixelation effect to the body part segmentation prediction. This allows a user to display low resolution body part segmentation and thus offers an aesthetic interpretation of the body part segmentation prediction.\n\n#### Inputs\n\n* **canvas** The canvas to be drawn onto.\n* **image** The original image to apply the mask to.\n* **maskImage** An ImageData containing the mask.  Ideally this should be generated by `toColoredMask`.\n* **maskOpacity** The opacity when drawing the mask on top of the image. Defaults to 0.7. Should be a float between 0 and 1.\n* **maskBlurAmount** How many pixels to blur the mask by. Defaults to 0. Should be an integer between 0 and 20.\n* **flipHorizontal** If the result should be flipped horizontally.  Defaults to false.\n* **pixelCellWidth** The width of each pixel cell. Default to 10 px.\n\n#### Example usage\n\n```javascript\nconst img = document.getElementById('image');\n\nconst segmenter = await bodySegmentation.createSegmenter(bodySegmentation.SupportedModels.BodyPix);\nconst segmentation = await segmenter.segmentPeople(img, {multiSegmentation: false, segmentBodyParts: true});\n\n// The colored part image is an rgb image with a corresponding color from the\n// rainbow colors for each part at each pixel, and white pixels where there is\n// no part.\nconst coloredPartImage = await bodySegmentation.toColoredMask(segmentation, bodySegmentation.bodyPixMaskValueToRainbowColor, {r: 255, g: 255, b: 255, a: 255}));\nconst opacity = 0.7;\nconst flipHorizontal = false;\nconst maskBlurAmount = 0;\nconst pixelCellWidth = 10.0;\nconst canvas = document.getElementById('canvas');\n// Draw the pixelated colored part image on top of the original image onto a\n// canvas.  Each pixel cell's width will be set to 10 px. The pixelated colored\n// part image will be drawn semi-transparent, with an opacity of 0.7, allowing\n// for the original image to be visible under.\nawait bodySegmentation.drawPixelatedMask(\n    canvas, img, coloredPartImage, opacity, maskBlurAmount,\n    flipHorizontal, pixelCellWidth);\n```\n\n![drawPixelatedMask](./images/drawPixelatedMask.png)\n\n*The pixelation effect is applied to part image using `drawPixelatedMask`; the result is shown in the image above.*\n\n### bodySegmentation.drawBokehEffect\n\nGiven a segmentation or array of segmentations, and an image, draws the image with its background blurred onto a canvas.\n\nAn example of applying a [bokeh effect](https://www.nikonusa.com/en/learn-and-explore/a/tips-and-techniques/bokeh-for-beginners.html):\n\n![Bokeh](./images/bokeh.gif)\n\n\n#### Inputs\n\n* **canvas** The canvas to draw the background-blurred image onto.\n* **image** The image to blur the background of and draw.\n* **segmentation** Single segmentation or array of segmentations.\n* **foregroundThreshold** The minimum probability to color a pixel as foreground\nrather than background.  Defaults to 0.5. Should be a number between 0 and 1.\n* **backgroundBlurAmount** How many pixels in the background blend into each\nother.  Defaults to 3. Should be an integer between 1 and 20.\n* **edgeBlurAmount** How many pixels to blur on the edge between the person\nand the background by.  Defaults to 3. Should be an integer between 0 and 20.\n* **flipHorizontal** If the output should be flipped horizontally. Defaults to false.\n\n#### Example Usage\n\n```javascript\nconst img = document.getElementById('image');\n\nconst segmenter = await bodySegmentation.createSegmenter(bodySegmentation.SupportedModels.MediaPipeSelfieSegmentation);\nconst segmentation = await segmenter.segmentPeople(img);\n\nconst foregroundThreshold = 0.5;\nconst backgroundBlurAmount = 3;\nconst edgeBlurAmount = 3;\nconst flipHorizontal = false;\n\nconst canvas = document.getElementById('canvas');\n// Draw the image with the background blurred onto the canvas. The edge between\n// the person and blurred background is blurred by 3 pixels.\nawait bodySegmentation.drawBokehEffect(\n    canvas, img, segmentation, foregroundThreshold, backgroundBlurAmount,\n    edgeBlurAmount, flipHorizontal);\n```\n\n![bokeh](./images/bokehimage.png)\n\n*The above shows the process of applying a 'bokeh' effect to an image (the left-most one) with `drawBokehEffect`.  An **inverted** mask is generated from a `Segmentation`.  The original image is then drawn onto the canvas, and using the [canvas compositing](https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D/globalCompositeOperation) operation `destination-over` the mask is drawn onto the canvas, causing the background to be removed.  The original image is blurred and drawn onto the canvas where it doesn't overlap with the existing image using the compositing operation `destination-over`.  The result is seen in the right-most image.*\n\n### bodySegmentation.blurBodyPart\n\nGiven a segmentation or array of segmentations, and an image, blurs some person body parts (e.g. left face and right face).\n\nAn example of applying a body part blur on `left_face` and `right_face` body parts for BodyPix model (other body parts can be specified):\n\n![three_people_faceblur](./images/three_people_faceblur.jpg)\n\n\n#### Inputs\n\n* **canvas** The canvas to draw the body-part blurred image onto.\n* **image** The image with people to blur the body-part and draw.\n* **segmentation** Single segmentation or array of segmentations.\n* **maskValuesToBlur** An array of red channel mask values to blur (representing different body parts, refer to `Segmentation` interface for more details).\n* **foregroundThreshold** The minimum probability to color a pixel as foreground\nrather than background.  Defaults to 0.5. Should be a number between 0 and 1.\n* **backgroundBlurAmount** How many pixels in the background blend into each\nother.  Defaults to 3. Should be an integer between 1 and 20.\n* **edgeBlurAmount** How many pixels to blur on the edge between the person\nand the background by.  Defaults to 3. Should be an integer between 0 and 20.\n* **flipHorizontal** If the output should be flipped horizontally. Defaults to false.\n\n#### Example Usage\n\n```javascript\nconst img = document.getElementById('image');\n\nconst segmenter = await bodySegmentation.createSegmenter(bodySegmentation.SupportedModels.BodyPix);\nconst segmentation = await segmenter.segmentPeople(img, {multiSegmentation: false, segmentBodyParts: true});\n\nconst foregroundThreshold = 0.5;\nconst backgroundBlurAmount = 3;\nconst edgeBlurAmount = 3;\nconst flipHorizontal = false;\nconst faceBodyPartIdsToBlur = [0, 1];\n\nconst canvas = document.getElementById('canvas');\n\nawait bodySegmentation.blurBodyPart(\n    canvas, img, partSegmentation, faceBodyPartIdsToBlur, foregroundThreshold,\n    backgroundBlurAmount, edgeBlurAmount, flipHorizontal);\n```","users":{}}