From df4ff645c5ae7751f8bec28238bd3566155abc6a Mon Sep 17 00:00:00 2001 From: Johannes Klein Date: Thu, 5 Dec 2024 17:14:28 +0100 Subject: [PATCH] Get altitude for user location; Add Geo model (#2492) * Also collect altitude from geolocation calls * Update geomodel plugin * Prop user location down into ai camera * Add geo model file to be linked into xcode project * Update env.example * Update env.example * Add geo model to cv model info file * Update Developer.js * Use user location for geo model while frame processing useGeoModel boolean signals the frame processing plugin to use the geo model as well. It is turned on when we have a location. * Update README.md * Geo model is optional * Adding a dummy neural network called geomodel.placeholder This model doesn't do anything just returns the input. This is a overhead we could justify in my view as not to spend some more time figuring out how to make a model optional in an Xcode project. * Geo model files are optional * Update README.md * Fix wrong model usage * Update README.md * This comment is no longer true * feat: use full-text index on new _searchableName field (#2487) Supports searches across non-contiguous tokens, e.g. finding "Mount Diablo fairy lantern" with a query like "diablo lantern" Related to #2348 * feat: add pull to refresh on notifications (#2489) Closes #2349 * feat: show notifications from others (#2491) * split notifications into tabs * lots of TypeScript conversion * feat: resize Heading5 and add Heading6 (closes #2480) * fix: mark remote observations as viewed from ObsDetails * feat: show indicator in Notifications tabs if unviewed notifications Closes #2451 * feat: add option to save all observations from batch obs edit (#2493) * fix: bring KebabMenu in line with designs Closes #2268 * Update testing instructions * v0.55.2 * v0.55.2+129 * fix: show observation from external link when Explore filters open (#2453) * Open observation in app when link is pressed even if modals are open * Remove comments * Add comment and reset obsid * fix: show onboarding content when signed in user has 0 observations (#2510) Closes #2508 * fix: load new Explore results when filters change (#2507) URL params got removed from the query key as a part of pull-to-refresh, but that's how changes to filters were triggering a new request. This restores that behavior, adds a test to ensure changes to filters call the API again, and fixes the prior test of pull to refresh which was not testing the equivalent behavior when pulling to refresh (API on first load, so the mock needed to be cleared). * fix: restore observation fetch when focusing on myobs (#2511) * fix: center number in photo count (#2512) Closes #2506 * refactor: minor edits for clarity * fix: specify CV model version in e2e test Hoping this deflakes the e2e a bit. Not sure why the AICamera test ever worked without this. * v0.55.3 * v0.55.3+130 * New Crowdin translations by GitHub Action (#2481) Co-authored-by: Crowdin Bot * fix: get quality grade descs closer to designs; add Joanne to staff (#2517) * fix: prevent BottomSheet title from overflowing over close button * Update to Geomodel name * Another name change * Rename var and const names * Rename const file references * Also collect altitude from geolocation calls * Update geomodel plugin * Prop user location down into ai camera * Add geo model file to be linked into xcode project * Update env.example * Update env.example * Add geo model to cv model info file * Update Developer.js * Use user location for geo model while frame processing useGeoModel boolean signals the frame processing plugin to use the geo model as well. It is turned on when we have a location. * Update README.md * Geo model is optional * Adding a dummy neural network called geomodel.placeholder This model doesn't do anything just returns the input. This is a overhead we could justify in my view as not to spend some more time figuring out how to make a model optional in an Xcode project. * Geo model files are optional * Update README.md * Fix wrong model usage * Update README.md * This comment is no longer true * refactor: minor edits for clarity * Update to Geomodel name * Another name change * Rename var and const names * Rename const file references --------- Co-authored-by: Ken-ichi Co-authored-by: Angie Co-authored-by: Crowdin Bot --- README.md | 7 ++++- env.example | 3 ++ ios/Podfile.lock | 4 +-- ios/geomodel.placeholder | Bin 0 -> 285 bytes .../project.pbxproj | 4 +++ ios/link-inat-model-files.sh | 6 ++++ package-lock.json | 4 +-- scripts/add-example-model.js | 5 ++++ scripts/add-github-actions-test-model.js | 3 ++ src/appConstants/e2e.ts | 4 +-- src/components/App.js | 2 +- src/components/Camera/AICamera/AICamera.js | 8 ++++-- .../Camera/AICamera/FrameProcessorCamera.js | 26 ++++++++++++++---- src/components/Camera/CameraContainer.tsx | 1 + src/components/Camera/CameraWithDevice.tsx | 8 ++++-- src/components/Developer/Developer.js | 10 ++++++- .../hooks/useOfflineSuggestions.js | 4 +-- src/sharedHelpers/fetchUserLocation.ts | 6 +++- src/sharedHelpers/{cvModel.ts => mlModel.ts} | 20 ++++++++++++-- src/sharedHooks/useWatchPosition.ts | 10 +++++-- 20 files changed, 108 insertions(+), 27 deletions(-) create mode 100644 ios/geomodel.placeholder rename src/sharedHelpers/{cvModel.ts => mlModel.ts} (78%) diff --git a/README.md b/README.md index 242ef19ff..dd14a6e33 100644 --- a/README.md +++ b/README.md @@ -18,11 +18,16 @@ See [CONTRIBUTING](CONTRIBUTING.md) for guidelines on contributing to this proje 1. Run `npx pod-install` or `cd ios && pod install` from the root directory 1. `cp env.example .env.staging` for staging and `cp env.example .env` for production and fill in appropriate values. This is not part of the code repo (contains secrets, such as OAuth client ID). 1. To run on Android, do this `cp android/example-keystore.properties android/keystore.properties`. Fill in the relevant values. If you are a member of iNat staff, get them from another member of iNat Staff. -1. Add AI Camera model and taxonomy files. The computer vision model files are not part of the code repo, and have to be installed. The app itself will load the model file with the filename specified in a .env file. On Android, the current file names are specified in these env variables `ANDROID_MODEL_FILE_NAME` and `ANDROID_TAXONOMY_FILE_NAME`. On iOS, the current file names are specified in these env variables `IOS_MODEL_FILE_NAME` and `IOS_TAXONOMY_FILE_NAME`. Currently, after a fresh clone of the repo, and copy of the env.example file, see above, you have to add the files following these steps: +1. Add AI Camera model and taxonomy files. The computer vision model and Geomodel files are not part of the code repo, and have to be installed. The app itself will load the model file with the filename specified in a .env file. On Android, the current file names are specified in these env variables `ANDROID_MODEL_FILE_NAME`, `ANDROID_TAXONOMY_FILE_NAME`, and `ANDROID_GEOMODEL_FILE_NAME`. On iOS, the current file names are specified in these env variables `IOS_MODEL_FILE_NAME`, `IOS_TAXONOMY_FILE_NAME`, and `IOS_GEOMODEL_FILE_NAME`. After a fresh clone of the repo and copying the env.example file (see above), you have to add the files by following these steps: 1. Add the example model files by executing `npm run add-example-model`. If that does not work continue with the next step. 1. If the download script fails: The sample model files are available in this [`small_model.zip`](https://github.com/inaturalist/SeekReactNative/releases/tag/v2.9.1-138) file. 1. On Android, these files are named `small_inception_tf1.tflite` and `small_export_tax.csv`. Create a camera folder within Android assets (i.e. `android/app/src/debug/assets/camera`) and place the files there. 1. On iOS, these files are named `small_inception_tf1.mlmodel` and `small_export_tax.json` and should be added to the `ios` folder. + 1. On iOS, in the `ios` folder, copy the Geomodel placeholder file `geomodel.placeholder` to `geomodel.mlmodel`. This file is just a placeholder to get the app to build. We'll release a functional Geomodel for development purposes soon. +1. Optional: Add Geomodel file. If you have access to the model file, you can add it by following these steps: + 1. Uncomment and set the `ANDROID_GEOMODEL_FILE_NAME` and `IOS_GEOMODEL_FILE_NAME` variables in the `.env` and `.env.staging` files. + 1. On Android, the file Geomodel should be placed in the `android/app/src/debug/assets/camera` folder. + 1. On iOS, the Geomodel file should be placed in the `ios` folder. ### Set up pre-commit hooks diff --git a/env.example b/env.example index 94dea29ec..4b4e89d13 100644 --- a/env.example +++ b/env.example @@ -24,8 +24,11 @@ GMAPS_API_KEY=some-key # here. ANDROID_MODEL_FILE_NAME=small_inception_tf1.tflite ANDROID_TAXONOMY_FILE_NAME=small_export_tax.csv +# Android Geomodel is not implemented yet, uncommenting this can cause unforeseen problems +#ANDROID_GEOMODEL_FILE_NAME=small_geomodel_tf1.tflite IOS_MODEL_FILE_NAME=small_inception_tf1.mlmodel IOS_TAXONOMY_FILE_NAME=small_export_tax.json +#IOS_GEOMODEL_FILE_NAME=small_geomodel_tf1.mlmodel CV_MODEL_VERSION=1.0 # Fastlane diff --git a/ios/Podfile.lock b/ios/Podfile.lock index 9b3446711..817ed3907 100644 --- a/ios/Podfile.lock +++ b/ios/Podfile.lock @@ -1181,7 +1181,7 @@ PODS: - VisionCamera/React (4.0.5): - React-Core - VisionCamera/FrameProcessors - - VisionCameraPluginInatVision (4.1.0): + - VisionCameraPluginInatVision (4.1.1): - React-Core - Yoga (1.14.0) @@ -1565,7 +1565,7 @@ SPEC CHECKSUMS: RNVectorIcons: 73ab573085f65a572d3b6233e68996d4707fd505 SocketRocket: f32cd54efbe0f095c4d7594881e52619cfe80b17 VisionCamera: 4c1d19f1ac09f2f42f758e306fcf642536627357 - VisionCameraPluginInatVision: ad0412249baa14ad9660e2c536f5370e6029deb2 + VisionCameraPluginInatVision: ddf079246f189a7a97fdb1298ce8be8d11f259a8 Yoga: c716aea2ee01df6258550c7505fa61b248145ced PODFILE CHECKSUM: eff4b75123af5d6680139a78c055b44ad37c269b diff --git a/ios/geomodel.placeholder b/ios/geomodel.placeholder new file mode 100644 index 0000000000000000000000000000000000000000..7638d7567896f76764d9a941b2fa78f7d20f7f21 GIT binary patch literal 285 zcmd;J5xU68CCtT|nO9I+BE_Y}!NtVLD6pV`C5Rs;$fm^t5?I8xoN*DGC6{t?ey(16 zW=TeAl3rp#K~Ab(a(+>2Zca&leonDoaeir0a;lI}Nq$jshOMoUo~fRpc5MgX zZ7vnOddpIaiZk=`gqSV#fU2#ywDGEj*c6|VnV6HBTq49B0= 18.0.0" diff --git a/scripts/add-example-model.js b/scripts/add-example-model.js index c3c2b258b..0570cc292 100644 --- a/scripts/add-example-model.js +++ b/scripts/add-example-model.js @@ -52,6 +52,11 @@ const iosDestinationPath = path.join( __dirname, "..", "ios" ); await fs.mkdir( iosDestinationPath, { recursive: true } ); await fs.copyFile( iosModelPath, path.join( iosDestinationPath, iosModelFile ) ); await fs.copyFile( iosTaxonomyPath, path.join( iosDestinationPath, iosTaxonomyFile ) ); + console.log( "Copying Geomodel placeholder to be model file..." ); + await fs.copyFile( + path.join( iosDestinationPath, "geomodel.placeholder" ), + path.join( iosDestinationPath, "geomodel.mlmodel" ) + ); console.log( "Delete temp model folder and its contents..." ); await fs.rm( modelPath, { recursive: true } ); diff --git a/scripts/add-github-actions-test-model.js b/scripts/add-github-actions-test-model.js index 43be81f5e..67b0d88cf 100644 --- a/scripts/add-github-actions-test-model.js +++ b/scripts/add-github-actions-test-model.js @@ -50,6 +50,9 @@ const iosDestinationPath = path.join( __dirname, "..", "ios" ); const androidTaxonomyFile = process.env.ANDROID_TAXONOMY_FILE_NAME; const iosModelFile = process.env.IOS_MODEL_FILE_NAME; const iosTaxonomyFile = process.env.IOS_TAXONOMY_FILE_NAME; + // TODO: donwload an example Geomodel from the internet + // NEEDS: https://github.com/inaturalist/iNaturalistMLWork/issues/146 + // const iosGeoModelFile = process.env.IOS_GEOMODEL_FILE_NAME; console.log( "Copying model files to assets folder..." ); await fs.mkdir( androidDestinationPath, { recursive: true } ); diff --git a/src/appConstants/e2e.ts b/src/appConstants/e2e.ts index cdc9379f3..c51d466fd 100644 --- a/src/appConstants/e2e.ts +++ b/src/appConstants/e2e.ts @@ -13,8 +13,8 @@ export const CHUCKS_PAD = { latitude: 51.3313127, longitude: 0.0509862, accuracy: 5, - altitude: null, - altitudeAccuracy: null, + altitude: 120.0234, + altitudeAccuracy: 2.123, heading: null, speed: null }; diff --git a/src/components/App.js b/src/components/App.js index e6b12aa8b..f9f951616 100644 --- a/src/components/App.js +++ b/src/components/App.js @@ -11,8 +11,8 @@ import React, { useEffect, useState } from "react"; import { LogBox } from "react-native"; import Realm from "realm"; import clearCaches from "sharedHelpers/clearCaches.ts"; -import { addARCameraFiles } from "sharedHelpers/cvModel.ts"; import { log } from "sharedHelpers/logger"; +import { addARCameraFiles } from "sharedHelpers/mlModel.ts"; import { useCurrentUser, useIconicTaxa, diff --git a/src/components/Camera/AICamera/AICamera.js b/src/components/Camera/AICamera/AICamera.js index 15308dd7a..a5dca4907 100644 --- a/src/components/Camera/AICamera/AICamera.js +++ b/src/components/Camera/AICamera/AICamera.js @@ -17,6 +17,7 @@ import { useDebugMode, usePerformance, useTranslation } from "sharedHooks"; import { isDebugMode } from "sharedHooks/useDebugMode"; +// import type { UserLocation } from "sharedHooks/useWatchPosition"; import colors from "styles/tailwindColors"; import { @@ -52,7 +53,8 @@ type Props = { takingPhoto: boolean, takePhotoAndStoreUri: Function, takePhotoOptions: Object, - setAiSuggestion: Function + setAiSuggestion: Function, + userLocation?: Object // UserLocation | null }; const AICamera = ( { @@ -65,7 +67,8 @@ const AICamera = ( { takingPhoto, takePhotoAndStoreUri, takePhotoOptions, - setAiSuggestion + setAiSuggestion, + userLocation }: Props ): Node => { const hasFlash = device?.hasFlash; const { isDebug } = useDebugMode( ); @@ -154,6 +157,7 @@ const AICamera = ( { takingPhoto={takingPhoto} inactive={inactive} resetCameraOnFocus={resetCameraOnFocus} + userLocation={userLocation} /> )} diff --git a/src/components/Camera/AICamera/FrameProcessorCamera.js b/src/components/Camera/AICamera/FrameProcessorCamera.js index f4b8aac3c..735f73a34 100644 --- a/src/components/Camera/AICamera/FrameProcessorCamera.js +++ b/src/components/Camera/AICamera/FrameProcessorCamera.js @@ -12,12 +12,18 @@ import React, { } from "react"; import { Platform } from "react-native"; import { Worklets } from "react-native-worklets-core"; -import { modelPath, modelVersion, taxonomyPath } from "sharedHelpers/cvModel.ts"; +import { + geomodelPath, + modelPath, + modelVersion, + taxonomyPath +} from "sharedHelpers/mlModel.ts"; import { orientationPatchFrameProcessor, usePatchedRunAsync } from "sharedHelpers/visionCameraPatches"; import { useDeviceOrientation } from "sharedHooks"; +// import type { UserLocation } from "sharedHooks/useWatchPosition"; type Props = { // $FlowIgnore @@ -38,7 +44,8 @@ type Props = { takingPhoto: boolean, taxonomyRollupCutoff?: number, inactive?: boolean, - resetCameraOnFocus: Function + resetCameraOnFocus: Function, + userLocation?: Object // UserLocation | null }; const DEFAULT_FPS = 1; @@ -67,7 +74,8 @@ const FrameProcessorCamera = ( { takingPhoto, taxonomyRollupCutoff = DEFAULT_TAXONOMY_CUTOFF_THRESHOLD, inactive, - resetCameraOnFocus + resetCameraOnFocus, + userLocation }: Props ): Node => { const { deviceOrientation } = useDeviceOrientation(); const [lastTimestamp, setLastTimestamp] = useState( undefined ); @@ -155,7 +163,14 @@ const FrameProcessorCamera = ( { taxonomyRollupCutoff, numStoredResults, cropRatio, - patchedOrientationAndroid + patchedOrientationAndroid, + useGeoModel: !!userLocation, + geomodelPath, + location: { + latitude: userLocation?.latitude, + longitude: userLocation?.longitude, + elevation: userLocation?.altitude + } } ); const timeAfter = Date.now(); const timeTaken = timeAfter - timeBefore; @@ -176,7 +191,8 @@ const FrameProcessorCamera = ( { numStoredResults, cropRatio, lastTimestamp, - fps + fps, + userLocation ] ); diff --git a/src/components/Camera/CameraContainer.tsx b/src/components/Camera/CameraContainer.tsx index a7a124596..69f93751c 100644 --- a/src/components/Camera/CameraContainer.tsx +++ b/src/components/Camera/CameraContainer.tsx @@ -206,6 +206,7 @@ const CameraContainer = ( ) => { newPhotoUris={newPhotoUris} setNewPhotoUris={setNewPhotoUris} setAiSuggestion={setAiSuggestion} + userLocation={userLocation} /> {showPhotoPermissionsGate && renderSavePhotoPermissionGate( { // If the user does not give location permissions in any form, diff --git a/src/components/Camera/CameraWithDevice.tsx b/src/components/Camera/CameraWithDevice.tsx index b5a89f247..3e55e9fcb 100644 --- a/src/components/Camera/CameraWithDevice.tsx +++ b/src/components/Camera/CameraWithDevice.tsx @@ -3,6 +3,7 @@ import React from "react"; import DeviceInfo from "react-native-device-info"; import type { CameraDevice } from "react-native-vision-camera"; import useDeviceOrientation from "sharedHooks/useDeviceOrientation.ts"; +import { UserLocation } from "sharedHooks/useWatchPosition.ts"; import AICamera from "./AICamera/AICamera"; import StandardCamera from "./StandardCamera/StandardCamera"; @@ -21,7 +22,8 @@ interface Props { newPhotoUris: Array, setNewPhotoUris: Function, takePhotoOptions: Object, - setAiSuggestion: Function + setAiSuggestion: Function, + userLocation: UserLocation | null } const CameraWithDevice = ( { @@ -36,7 +38,8 @@ const CameraWithDevice = ( { newPhotoUris, setNewPhotoUris, takePhotoOptions, - setAiSuggestion + setAiSuggestion, + userLocation }: Props ) => { const { isLandscapeMode } = useDeviceOrientation( ); const flexDirection = isTablet && isLandscapeMode @@ -76,6 +79,7 @@ const CameraWithDevice = ( { takePhotoAndStoreUri={takePhotoAndStoreUri} takePhotoOptions={takePhotoOptions} setAiSuggestion={setAiSuggestion} + userLocation={userLocation} /> )} diff --git a/src/components/Developer/Developer.js b/src/components/Developer/Developer.js index e2fdc6ad7..f196fe73b 100644 --- a/src/components/Developer/Developer.js +++ b/src/components/Developer/Developer.js @@ -45,6 +45,10 @@ const taxonomyFileName = Platform.select( { ios: Config.IOS_TAXONOMY_FILE_NAME, android: Config.ANDROID_TAXONOMY_FILE_NAME } ); +const geomodelFileName = Platform.select( { + ios: Config.IOS_GEOMODEL_FILE_NAME, + android: Config.ANDROID_GEOMODEL_FILE_NAME +} ); /* eslint-disable i18next/no-literal-string */ const Developer = (): Node => { @@ -151,10 +155,14 @@ const Developer = (): Node => { Model: {modelFileName} - + Taxonomy: {taxonomyFileName} + + Geomodel: + {geomodelFileName} +

Paths

Documents

diff --git a/src/components/Suggestions/hooks/useOfflineSuggestions.js b/src/components/Suggestions/hooks/useOfflineSuggestions.js index 8c36451d1..4d4535921 100644 --- a/src/components/Suggestions/hooks/useOfflineSuggestions.js +++ b/src/components/Suggestions/hooks/useOfflineSuggestions.js @@ -5,8 +5,8 @@ import { useEffect, useState } from "react"; -import { predictImage } from "sharedHelpers/cvModel.ts"; import { log } from "sharedHelpers/logger"; +import { predictImage } from "sharedHelpers/mlModel.ts"; const logger = log.extend( "useOfflineSuggestions" ); @@ -29,8 +29,6 @@ const useOfflineSuggestions = ( let rawPredictions = []; try { const result = await predictImage( selectedPhotoUri ); - // Android returns an object with a predictions key, while iOS returns an array because - // currently Seek codebase as well expects different return types for each platform rawPredictions = result.predictions; } catch ( predictImageError ) { dispatch( { type: "SET_FETCH_STATUS", fetchStatus: "offline-error" } ); diff --git a/src/sharedHelpers/fetchUserLocation.ts b/src/sharedHelpers/fetchUserLocation.ts index 64cb2dc49..030982406 100644 --- a/src/sharedHelpers/fetchUserLocation.ts +++ b/src/sharedHelpers/fetchUserLocation.ts @@ -32,6 +32,8 @@ interface UserLocation { latitude: number; longitude: number; positional_accuracy: number; + altitude: number | null; + altitudinal_accuracy: number | null; } const fetchCoarseUserLocation = async ( ): Promise => { @@ -49,7 +51,9 @@ const fetchCoarseUserLocation = async ( ): Promise => { const userLocation = { latitude: coords.latitude, longitude: coords.longitude, - positional_accuracy: coords.accuracy + positional_accuracy: coords.accuracy, + altitude: coords.altitude, + altitudinal_accuracy: coords.altitudeAccuracy }; return userLocation; } catch ( e ) { diff --git a/src/sharedHelpers/cvModel.ts b/src/sharedHelpers/mlModel.ts similarity index 78% rename from src/sharedHelpers/cvModel.ts rename to src/sharedHelpers/mlModel.ts index 88ffe6323..57f96178e 100644 --- a/src/sharedHelpers/cvModel.ts +++ b/src/sharedHelpers/mlModel.ts @@ -2,7 +2,8 @@ import i18next from "i18next"; import { Alert, Platform } from "react-native"; import Config from "react-native-config"; import RNFS from "react-native-fs"; -import { getPredictionsForImage } from "vision-camera-plugin-inatvision"; +import type { Location } from "vision-camera-plugin-inatvision"; +import { getPredictionsForImage, getPredictionsForLocation } from "vision-camera-plugin-inatvision"; const modelFiles = { // The iOS model and taxonomy files always have to be referenced in the @@ -12,8 +13,10 @@ const modelFiles = { // build phase script. See ios/link-inat-model-files.sh IOSMODEL: "cvmodel.mlmodelc", IOSTAXONOMY: "taxonomy.json", + IOSGEOMODEL: "geomodel.mlmodelc", ANDROIDMODEL: Config.ANDROID_MODEL_FILE_NAME, - ANDROIDTAXONOMY: Config.ANDROID_TAXONOMY_FILE_NAME + ANDROIDTAXONOMY: Config.ANDROID_TAXONOMY_FILE_NAME, + ANDROIDGEOMODEL: Config.ANDROID_GEOMODEL_FILE_NAME }; export const modelPath: string = Platform.select( { @@ -26,6 +29,11 @@ export const taxonomyPath: string = Platform.select( { android: `${RNFS.DocumentDirectoryPath}/${modelFiles.ANDROIDTAXONOMY}` } ); +export const geomodelPath: string = Platform.select( { + ios: `${RNFS.MainBundlePath}/${modelFiles.IOSGEOMODEL}`, + android: `${RNFS.DocumentDirectoryPath}/${modelFiles.ANDROIDGEOMODEL}` +} ); + export const modelVersion = Config.CV_MODEL_VERSION; export const predictImage = ( uri: string ) => { @@ -53,6 +61,12 @@ export const predictImage = ( uri: string ) => { } ); }; +export const predictLocation = ( location: Location ) => getPredictionsForLocation( { + geomodelPath, + taxonomyPath, + location +} ); + const addCameraFilesAndroid = () => { const copyFilesAndroid = ( source, destination ) => { RNFS.copyFileAssets( source, destination ) @@ -70,6 +84,7 @@ const addCameraFilesAndroid = () => { RNFS.readDirAssets( "camera" ).then( results => { const model = modelFiles.ANDROIDMODEL; const taxonomy = modelFiles.ANDROIDTAXONOMY; + const geomodel = modelFiles.ANDROIDGEOMODEL; const hasModel = results.find( r => r.name === model ); @@ -77,6 +92,7 @@ const addCameraFilesAndroid = () => { if ( hasModel !== undefined ) { copyFilesAndroid( `camera/${model}`, modelPath ); copyFilesAndroid( `camera/${taxonomy}`, taxonomyPath ); + copyFilesAndroid( `camera/${geomodel}`, geomodelPath ); } else { Alert.alert( i18next.t( "No-model-found" ), diff --git a/src/sharedHooks/useWatchPosition.ts b/src/sharedHooks/useWatchPosition.ts index 689b88893..1ec581a3e 100644 --- a/src/sharedHooks/useWatchPosition.ts +++ b/src/sharedHooks/useWatchPosition.ts @@ -12,10 +12,12 @@ import { clearWatch, watchPosition } from "../sharedHelpers/geolocationWrapper"; export const TARGET_POSITIONAL_ACCURACY = 10; const MAX_POSITION_AGE_MS = 60_000; -interface UserLocation { +export interface UserLocation { latitude: number, longitude: number, - positional_accuracy: number + positional_accuracy: number, + altitude: number | null, + altitudinal_accuracy: number | null } const geolocationOptions = { @@ -74,7 +76,9 @@ const useWatchPosition = ( options: { const newLocation = { latitude: currentPosition?.coords?.latitude, longitude: currentPosition?.coords?.longitude, - positional_accuracy: currentPosition?.coords?.accuracy + positional_accuracy: currentPosition?.coords?.accuracy, + altitude: currentPosition?.coords?.altitude, + altitudinal_accuracy: currentPosition?.coords?.altitudeAccuracy }; setUserLocation( newLocation ); if (