\r\n
\r\n {!this.props.isGeolocationAvailable && (\r\n
Your browser does not support Geolocation
\r\n )}\r\n\r\n {!this.props.isGeolocationEnabled && (\r\n
Geolocation is not enabled
\r\n )}\r\n\r\n {this.props.coords && (\r\n
\r\n \r\n \r\n latitude | \r\n {this.props.coords.latitude} | \r\n
\r\n \r\n longitude | \r\n {this.props.coords.longitude} | \r\n
\r\n \r\n
\r\n )}\r\n\r\n {!this.props.coords &&
Getting the location data…
}\r\n
\r\n );\r\n }\r\n}\r\n\r\nexport default geolocated({\r\n positionOptions: {\r\n enableHighAccuracy: true,\r\n },\r\n // userDecisionTimeout: 5000,\r\n watchPosition: false,\r\n})(GPSInput);\r\n","import React, { createRef, useCallback, useEffect, useState } from \"react\";\r\nimport { Flex, Image, Button, Text, Dropdown, Loader } from \"@fluentui/react-northstar\";\r\nimport { useTranslation } from 'react-i18next';\r\nimport { AttendanceImageConfig, ClockingInput, ClockingType, FaceRecognitionConfig, Result, UserAttendanceImageConfig, UserFaceRecognitionConfig, WorkplaceConfig } from \"../../models\";\r\nimport { TextField, updateA } from \"@fluentui/react\";\r\nimport { Dialog } from \"@fluentui/react-northstar\";\r\nimport Icons from \"../../assests/images/SVGIcons/svgIcons\";\r\nimport { getWorkplaces } from \"../../api/Company\";\r\nimport { isLocInsideWorkplaces } from \"../../utils\";\r\nimport * as AttendanceApi from \"../../api/attendance\";\r\nimport GpsInput from \"../gps-input\";\r\nimport * as faceapi from 'face-api.js';\r\nimport WebCam from \"react-webcam\";\r\nimport { FaceMatcher } from \"face-api.js\";\r\nimport { dayStatusColor } from \"../../assests/styles/colors\";\r\n\r\n\r\nfunction dataURLtoFile(dataurl: string, filename: string) {\r\n const arr = dataurl.split(\",\");\r\n const mime = (arr[0].match(/:(.*?);/) ?? [])[1];\r\n const bstr = atob(arr[1]);\r\n let n = bstr.length;\r\n const u8arr = new Uint8Array(n);\r\n\r\n while (n--) {\r\n u8arr[n] = bstr.charCodeAt(n);\r\n }\r\n\r\n return new File([u8arr], filename, { type: mime });\r\n}\r\n\r\nexport default ({ user, updateParent, gpsRef, closeDialog, dialogVisible,\r\n clockingType, faceMatcher, primaryColor, faceRecognitionRequired }: any) => {\r\n\r\n const { t } = useTranslation();\r\n //const [dialogVisible, setDialogVisible] = useState(false);\r\n const [input, setInput] = useState(\"\" as unknown as ClockingInput);\r\n const [loading, setLoading] = useState(false)\r\n const [error, setError] = useState(\"\");\r\n const [errorDialogOpen, setErrorDialogOpen] = useState(false)\r\n const [errorDialogContent, setErrorDialogContent] = useState(\"\")\r\n const [errorDialogCancelAllowed, setErrorDialogCancelAllowed] = useState(false)\r\n const [errorDialogConfirmButtonContent, setErrorDialogConfirmButtonContent] = useState(\"\")\r\n const [errorDialogCancelButtonContent, setErrorDialogCancelButtonContent] = useState(\"\")\r\n const [faceDetected, setFaceDetected] = useState(\"unknown\")\r\n const [count, setCount] = useState(0);\r\n const webcamRef = React.useRef(null);\r\n\r\n var attendanceImageRequired = true\r\n if (user.attendanceImageConfig === UserAttendanceImageConfig.InheritedFromCompany) {\r\n if (user.company?.attendanceImageConfig === AttendanceImageConfig.NotRequired)\r\n attendanceImageRequired = false\r\n } else {\r\n if (user.attendanceImageConfig === UserAttendanceImageConfig.NotRequired)\r\n attendanceImageRequired = false\r\n }\r\n\r\n\r\n const eyesArray = [
,\r\n
,\r\n
]\r\n\r\n useEffect(() => {\r\n const timerId = setInterval(() => {\r\n // Use a functional state update to correctly increment the count\r\n setCount(count => count + 1);\r\n }, 2000);\r\n\r\n return () => clearInterval(timerId);\r\n }, []);\r\n\r\n const image = eyesArray[count % eyesArray.length];\r\n const detected =
;\r\n\r\n const detectAndMatchFaces = async () => {\r\n\r\n try {\r\n const video: any = document.getElementById('cameraFeed');\r\n const canvas: any = document.getElementById('reflay');\r\n if (video && canvas) {\r\n var faceDescriptions = await faceapi\r\n .detectAllFaces(video)\r\n .withFaceLandmarks()\r\n .withFaceDescriptors();\r\n canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height);\r\n\r\n const results: any = faceDescriptions.map((fd: any) => faceMatcher?.findBestMatch(fd.descriptor));\r\n \r\n if(results.length == 0)\r\n {\r\n setFaceDetected(\"unknown\")\r\n }\r\n results.forEach((bestMatch: any, i: any) => {\r\n setFaceDetected(\"unknown\")\r\n const box = faceDescriptions[i].detection.box;\r\n if (box && box.x !== null && box.y !== null &&\r\n box.width !== null && box.height !== null) {\r\n const anchor = { x: box.x, y: box.y - 60 };\r\n const drawOptions: any = {\r\n anchorPosition: 'TOP_LEFT',\r\n backgroundColor: primaryColor\r\n };\r\n if (bestMatch != undefined) {\r\n const text = bestMatch.toString().split(' ')[0];\r\n const drawText = new faceapi.draw.DrawTextField(text, anchor, drawOptions);\r\n if (video && canvas) {\r\n drawText.draw(canvas);\r\n }\r\n\r\n var label: never = bestMatch.toString().split(' ')[0] as never\r\n //setFaceDetected(\"\")\r\n if (label != 'unknown') {\r\n setFaceDetected(label)\r\n }\r\n }\r\n }\r\n });\r\n if (video && canvas) {\r\n const frameCallback: FrameRequestCallback = (timestamp: DOMHighResTimeStamp) => {\r\n detectAndMatchFaces();\r\n };\r\n requestAnimationFrame(frameCallback);\r\n return results;\r\n }\r\n }\r\n } catch (error: any) {\r\n console.log(error.toString())\r\n }\r\n\r\n }\r\n\r\n const AddListener = async () => {\r\n const video: any = document.getElementById('cameraFeed');\r\n video.play();\r\n if (!video.paused) {\r\n return detectAndMatchFaces();\r\n }\r\n }\r\n\r\n const onErrorDialogCancel = () => {\r\n user.getUserInfo();\r\n updateParent()\r\n setErrorDialogOpen(false)\r\n setErrorDialogContent('')\r\n setErrorDialogConfirmButtonContent(t('OK'))\r\n setErrorDialogCancelButtonContent(t('Cancel'))\r\n }\r\n\r\n const onErrorDialogOk = () => {\r\n user.getUserInfo();\r\n updateParent()\r\n setErrorDialogOpen(false)\r\n setErrorDialogContent('')\r\n setErrorDialogConfirmButtonContent(t('OK'))\r\n setErrorDialogCancelButtonContent(t('Cancel'))\r\n closeClockingDialog();\r\n }\r\n\r\n const closeClockingDialog = () => {\r\n closeDialog()\r\n }\r\n\r\n const cancel = async () => {\r\n\r\n try {\r\n console.log(\"close dialog\")\r\n\r\n user.loading = true;\r\n await user.getUserInfo();\r\n await updateParent()\r\n const video: any = document.getElementById('cameraFeed');\r\n const canvas: any = document.getElementById('reflay');\r\n video.remove()\r\n canvas.remove()\r\n closeClockingDialog();\r\n } catch (error: any) {\r\n console.log(error.toString())\r\n setLoading(false)\r\n return\r\n\r\n }\r\n\r\n }\r\n\r\n const clock = async () => {\r\n setLoading(true)\r\n try {\r\n // image\r\n var showCam = webcamRef.current ? true : false\r\n if (user.attendanceImageConfig === UserAttendanceImageConfig.InheritedFromCompany) {\r\n if (user.company?.attendanceImageConfig === AttendanceImageConfig.NotRequired)\r\n showCam = false\r\n } else {\r\n if (user.attendanceImageConfig === UserAttendanceImageConfig.NotRequired)\r\n showCam = false\r\n }\r\n var image = null;\r\n if (showCam) {\r\n if (webcamRef && webcamRef.current) {\r\n if (faceRecognitionRequired == true) {\r\n if (faceDetected.trim() != user.firstName.trim()) {\r\n throw new Error(\"face not recognized\");\r\n }\r\n }\r\n const capture = (webcamRef as any).current.getScreenshot();\r\n if (capture) {\r\n image = dataURLtoFile(capture, \"image.png\");\r\n } else {\r\n throw new Error(\"Image was null\");\r\n }\r\n }\r\n\r\n }\r\n\r\n // workplace\r\n if (user.workplaceConfig.toString() === WorkplaceConfig.CompanyOrUserSpecific) {\r\n var workplaces = await getWorkplaces(user?.company?.id ?? 0, user.userId);\r\n if (workplaces.payload.length > 0) {\r\n var insideWorkplace = isLocInsideWorkplaces({ lat: input.latitude, lng: input.longitude }, workplaces.payload)\r\n if (!insideWorkplace) {\r\n setError(t(\"Not allowed to clock outside the workplace bounds\"))\r\n setLoading(false)\r\n return;\r\n }\r\n }\r\n }\r\n await user.clock(clockingType, { ...input, image });\r\n setInput({ ...input, comments: \"\" })\r\n setLoading(false)\r\n //hone\r\n console.log(\"close dialog\")\r\n closeClockingDialog();\r\n user.loading = true;\r\n await user.getUserInfo();\r\n await updateParent()\r\n } catch (error: any) {\r\n console.log(error.toString())\r\n if (error.toString().trim() == \"Error: face not recognized\") {\r\n setError(t(\"Face not recognized\"))\r\n setLoading(false)\r\n return\r\n }\r\n if (error && error.errors && error.errors.length > 0 && error.errors[0].description) {\r\n if (error.errors[0].path && error.errors[0].path.toLowerCase() === 'type') {\r\n setErrorDialogOpen(true)\r\n setErrorDialogContent(t(error.errors[0].description))\r\n }\r\n setError(t(error.errors[0].description))\r\n } else {\r\n setError(t(\"Please allow the browser to use camera\"))\r\n }\r\n setLoading(false)\r\n }\r\n\r\n };\r\n\r\n return (\r\n <>\r\n <>\r\n
\r\n }\r\n content={