Toggle navigation
Toggle navigation
This project
Loading...
Sign in
공태현
/
healthcare-with-webcam
Go to a project
Toggle navigation
Toggle navigation pinning
Projects
Groups
Snippets
Help
Project
Activity
Repository
Graphs
Network
Create a new issue
Commits
Issue Boards
Authored by
공태현
2022-05-25 17:50:35 +0900
Browse Files
Options
Browse Files
Download
Email Patches
Plain Diff
Commit
3785525bcb0ef4645991f2473c0bcb8652ddabae
3785525b
1 parent
7aec3685
Implement googelposeAPI with webcam on HTML using samplecode
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
89 additions
and
0 deletions
googlepose.html
googlepose.html
0 → 100644
View file @
3785525
<!DOCTYPE html>
<html
lang=
"en"
>
<head>
<meta
charset=
"UTF-8"
>
<meta
http-equiv=
"X-UA-Compatible"
content=
"IE=edge"
>
<meta
name=
"viewport"
content=
"width=device-width, initial-scale=1.0"
>
<title>
Squart Page
</title>
</head>
<body>
<div>
Teachable Machine Pose Model - Squart
</div>
<button
type=
"button"
onclick=
"init()"
>
Start
</button>
<div><canvas
id=
"canvas"
></canvas></div>
<div
id=
"label-container"
></div>
<script
src=
"https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@1.3.1/dist/tf.min.js"
></script>
<script
src=
"https://cdn.jsdelivr.net/npm/@teachablemachine/pose@0.8/dist/teachablemachine-pose.min.js"
></script>
<script
type=
"text/javascript"
>
// More API functions here:
// https://github.com/googlecreativelab/teachablemachine-community/tree/master/libraries/pose
// the link to your model provided by Teachable Machine export panel
const
URL
=
"https://teachablemachine.withgoogle.com/models/xymjZj4q-/"
;
// 임시 URI - stand , squart, bent(허리 굽은 자세) 학습.
let
model
,
webcam
,
ctx
,
labelContainer
,
maxPredictions
;
async
function
init
()
{
const
modelURL
=
URL
+
"model.json"
;
const
metadataURL
=
URL
+
"metadata.json"
;
// load the model and metadata
// Refer to tmImage.loadFromFiles() in the API to support files from a file picker
// Note: the pose library adds a tmPose object to your window (window.tmPose)
model
=
await
tmPose
.
load
(
modelURL
,
metadataURL
);
maxPredictions
=
model
.
getTotalClasses
();
// Convenience function to setup a webcam
const
size
=
200
;
const
flip
=
true
;
// whether to flip the webcam
webcam
=
new
tmPose
.
Webcam
(
size
,
size
,
flip
);
// width, height, flip
await
webcam
.
setup
();
// request access to the webcam
await
webcam
.
play
();
window
.
requestAnimationFrame
(
loop
);
// append/get elements to the DOM
const
canvas
=
document
.
getElementById
(
"canvas"
);
canvas
.
width
=
size
;
canvas
.
height
=
size
;
ctx
=
canvas
.
getContext
(
"2d"
);
labelContainer
=
document
.
getElementById
(
"label-container"
);
for
(
let
i
=
0
;
i
<
maxPredictions
;
i
++
)
{
// and class labels
labelContainer
.
appendChild
(
document
.
createElement
(
"div"
));
}
}
async
function
loop
(
timestamp
)
{
webcam
.
update
();
// update the webcam frame
await
predict
();
window
.
requestAnimationFrame
(
loop
);
}
async
function
predict
()
{
// Prediction #1: run input through posenet
// estimatePose can take in an image, video or canvas html element
const
{
pose
,
posenetOutput
}
=
await
model
.
estimatePose
(
webcam
.
canvas
);
// Prediction 2: run input through teachable machine classification model
const
prediction
=
await
model
.
predict
(
posenetOutput
);
for
(
let
i
=
0
;
i
<
maxPredictions
;
i
++
)
{
const
classPrediction
=
prediction
[
i
].
className
+
": "
+
prediction
[
i
].
probability
.
toFixed
(
2
);
labelContainer
.
childNodes
[
i
].
innerHTML
=
classPrediction
;
}
// finally draw the poses
drawPose
(
pose
);
}
function
drawPose
(
pose
)
{
if
(
webcam
.
canvas
)
{
ctx
.
drawImage
(
webcam
.
canvas
,
0
,
0
);
// draw the keypoints and skeleton
if
(
pose
)
{
const
minPartConfidence
=
0.5
;
tmPose
.
drawKeypoints
(
pose
.
keypoints
,
minPartConfidence
,
ctx
);
tmPose
.
drawSkeleton
(
pose
.
keypoints
,
minPartConfidence
,
ctx
);
}
}
}
</script>
</body>
</html>
\ No newline at end of file
Please
register
or
login
to post a comment