handleMemoryPressure crashes apps on IOS with EXC_BAD_ACCESS #2675
Replies: 4 comments
-
Beta Was this translation helpful? Give feedback.
-
Noticed every time I mount the component there's a spike in memory, but when I unmount it doesn't release. I tried to trigger gc on unmount, but no luck in releasing the memory. |
Beta Was this translation helpful? Give feedback.
-
![]() frame processor: import { useAppState } from '@react-native-community/hooks'
import { useIsFocused } from '@react-navigation/native'
import { PaintStyle, Skia } from '@shopify/react-native-skia'
import type { SkPaint } from '@shopify/react-native-skia/src/skia/types/Paint'
import { useEffect, useMemo, useRef, useState } from 'react'
import { Platform, Text, View } from 'react-native'
import { ColorConversionCodes, DataTypes, ObjectType, OpenCV } from 'react-native-fast-opencv'
import Reanimated, {
useAnimatedReaction,
useSharedValue as useReanimatedSharedValue,
withDelay,
withRepeat,
withTiming,
} from 'react-native-reanimated'
import { useSafeAreaInsets } from 'react-native-safe-area-context'
import {
Camera,
runAtTargetFps,
useCameraDevice,
useCameraPermission,
useFrameProcessor,
useSkiaFrameProcessor,
} from 'react-native-vision-camera'
import { useFaceDetector } from 'react-native-vision-camera-face-detector'
import { useSharedValue as useWorkletSharedValue, Worklets } from 'react-native-worklets-core'
import { useResizePlugin } from 'vision-camera-resize-plugin'
import { UiButton, UiImage } from '@/ui'
Reanimated.addWhitelistedNativeProps({
zoom: true,
})
const ReanimatedCamera = Reanimated.createAnimatedComponent(Camera)
const CROP_SIZE = 48
const PIXEL_FORMAT = 'bgra'
const PEXELS_PER_ROW = 4
const DATA_TYPE = 'uint8'
const OPENCV_DATA_TYPE = DataTypes.CV_8U
const COLOR_CONVERSION_CODES = ColorConversionCodes.COLOR_BGRA2GRAY
type Props = {
onFaceResized: (image: Uint8Array<ArrayBufferLike>) => void
}
export default function ScanFaceCamera({ onFaceResized }: Props) {
const insets = useSafeAreaInsets()
const isFocused = useIsFocused()
const currentAppState = useAppState()
const device = useCameraDevice('front')
const { hasPermission, requestPermission } = useCameraPermission()
// const zoom = useReanimatedSharedValue(device?.neutralZoom ?? 1)
//
// const zoomAnimatedProps = useAnimatedProps<CameraProps>(
// () => ({
// zoom: interpolate(
// 1,
// [1, 10],
// [device?.minZoom ?? 0.5, device?.maxZoom ?? 2],
// Extrapolation.CLAMP,
// ),
// }),
// [zoom],
// )
const scanProgress = useReanimatedSharedValue(0)
useEffect(() => {
scanProgress.value = withDelay(
500,
withRepeat(
withTiming(1, {
duration: 2000,
}),
Infinity, // Infinite repeat
// true, // Reverse direction
),
)
}, [])
const scanProgressWorkletSharedValue = useWorkletSharedValue(0)
useAnimatedReaction(
() => scanProgress.value,
value => {
scanProgressWorkletSharedValue.value = value
},
)
const [initializationDelay, setInitializationDelay] = useState(false)
useEffect(() => {
const timeout = setTimeout(() => {
setInitializationDelay(true)
}, 1000)
return () => clearTimeout(timeout)
}, [])
const camera = useRef<Camera>(null)
const { detectFaces } = useFaceDetector({
performanceMode: 'fast',
classificationMode: 'all',
landmarkMode: 'none',
contourMode: 'none',
})
const { resize } = useResizePlugin()
const faceContainerPaints = useWorkletSharedValue<Record<number, SkPaint>>({})
const [previewImage, setPreviewImage] = useState('')
const updatePreviewImage = useMemo(
() =>
Worklets.createRunOnJS((dataBase64: string) => {
setPreviewImage(`data:image/png;base64,${dataBase64}`)
}),
[],
)
const frameProcessor = useSkiaFrameProcessor(
frame => {
'worklet'
frame.render()
const _faces = detectFaces(frame)
_faces.forEach((face, idx) => {
const faceContainerPath = Skia.Path.Make()
if (face.bounds) {
const rect = Skia.XYWHRect(
face.bounds.x,
face.bounds.y,
face.bounds.height,
face.bounds.width,
)
faceContainerPath.addOval(rect)
faceContainerPath.trim(0, scanProgressWorkletSharedValue.value, false)
frame.save()
if (!faceContainerPaints.value[idx]) {
faceContainerPaints.value[idx] = Skia.Paint()
faceContainerPaints.value[idx].setStyle(PaintStyle.Stroke)
faceContainerPaints.value[idx].setStrokeWidth(4)
faceContainerPaints.value[idx].setColor(Skia.Color('red'))
}
const isPitchAngleValid = face.pitchAngle > -5 && face.pitchAngle < 5
const isRollAngleValid = face.rollAngle > -5 && face.rollAngle < 5
const isYawAngleValid = face.yawAngle > -5 && face.yawAngle < 5
if (isPitchAngleValid && isRollAngleValid && isYawAngleValid) {
faceContainerPaints.value[idx].setColor(Skia.Color('green'))
} else {
faceContainerPaints.value[idx].setColor(Skia.Color('red'))
}
frame.drawPath(faceContainerPath, faceContainerPaints.value[idx])
frame.restore()
if (isPitchAngleValid && isRollAngleValid && isYawAngleValid) {
runAtTargetFps(1, () => {
'worklet'
try {
const resized = resize(frame, {
scale: {
width: CROP_SIZE,
height: CROP_SIZE,
},
crop: {
x: face.bounds.x,
y: face.bounds.y,
width: face.bounds.width,
height: face.bounds.height,
},
pixelFormat: PIXEL_FORMAT,
dataType: DATA_TYPE,
rotation: '90deg',
})
OpenCV.clearBuffers()
// Create a Mat from the resized buffer.
// We assume the resized buffer has length = CROP_SIZE * CROP_SIZE * 3.
// We pass the new Uint8Array of the resized buffer.
const mat = OpenCV.frameBufferToMat(
CROP_SIZE,
CROP_SIZE,
PEXELS_PER_ROW,
new Uint8Array(resized.buffer),
)
// Create a destination Mat of the correct dimensions.
const dst = OpenCV.createObject(
ObjectType.Mat,
CROP_SIZE,
CROP_SIZE,
OPENCV_DATA_TYPE,
)
// Since our resized image is already CROP_SIZExCROP_SIZE,
// the ROI for crop is the entire image (starting at 0,0).
const roi = OpenCV.createObject(ObjectType.Rect, 0, 0, CROP_SIZE, CROP_SIZE)
// Crop: mat -> dst using ROI.
OpenCV.invoke('crop', mat, dst, roi)
// const rgbMat = OpenCV.createObject(
// ObjectType.Mat,
// CROP_SIZE,
// CROP_SIZE,
// OPENCV_DATA_TYPE,
// )
// OpenCV.invoke('cvtColor', mat, rgbMat, ColorConversionCodes.COLOR_BGR2RGB)
//
// const resBuff = OpenCV.matToBuffer(rgbMat, 'uint8')
// onFaceResized(resBuff.buffer)
// const result = OpenCV.toJSValue(rgbMat)
const grayscaleMat = OpenCV.createObject(
ObjectType.Mat,
CROP_SIZE,
CROP_SIZE,
OPENCV_DATA_TYPE,
)
OpenCV.invoke('cvtColor', mat, grayscaleMat, COLOR_CONVERSION_CODES)
const resBuff = OpenCV.matToBuffer(grayscaleMat, DATA_TYPE)
onFaceResized(new Uint8Array(resBuff.buffer))
try {
const result = OpenCV.toJSValue(grayscaleMat, 'png')
updatePreviewImage(result.base64)
} catch (error) {
/* empty */
}
OpenCV.clearBuffers()
} catch (error) {
console.error(error)
}
})
}
}
})
},
[detectFaces, faceContainerPaints.value, scanProgressWorkletSharedValue.value],
)
const androidFrameProcessor = useFrameProcessor(frame => {
'worklet'
const _faces = detectFaces(frame)
_faces.forEach(face => {
if (face.bounds) {
const delta = 5
const isPitchAngleValid = face.pitchAngle > -delta && face.pitchAngle < delta
const isRollAngleValid = face.rollAngle > -delta && face.rollAngle < delta
const isYawAngleValid = face.yawAngle > -delta && face.yawAngle < delta
if (isPitchAngleValid && isRollAngleValid && isYawAngleValid) {
runAtTargetFps(1, () => {
'worklet'
try {
const resized = resize(frame, {
scale: {
width: CROP_SIZE,
height: CROP_SIZE,
},
crop: {
/* flip coordinates because of android camera is rotated */
x: face.bounds.y,
y: face.bounds.x,
width: face.bounds.height,
height: face.bounds.width,
},
pixelFormat: PIXEL_FORMAT,
dataType: DATA_TYPE,
rotation: '270deg',
})
OpenCV.clearBuffers()
const mat = OpenCV.frameBufferToMat(
CROP_SIZE,
CROP_SIZE,
PEXELS_PER_ROW,
new Uint8Array(resized.buffer),
)
const dst = OpenCV.createObject(
ObjectType.Mat,
CROP_SIZE,
CROP_SIZE,
OPENCV_DATA_TYPE,
)
const roi = OpenCV.createObject(ObjectType.Rect, 0, 0, CROP_SIZE, CROP_SIZE)
OpenCV.invoke('crop', mat, dst, roi)
const grayscaleMat = OpenCV.createObject(
ObjectType.Mat,
CROP_SIZE,
CROP_SIZE,
OPENCV_DATA_TYPE,
)
OpenCV.invoke('cvtColor', mat, grayscaleMat, COLOR_CONVERSION_CODES)
const resBuff = OpenCV.matToBuffer(grayscaleMat, DATA_TYPE)
onFaceResized(new Uint8Array(resBuff.buffer))
try {
const result = OpenCV.toJSValue(grayscaleMat, 'png')
updatePreviewImage(result.base64)
} catch (error) {
/* empty */
}
OpenCV.clearBuffers()
} catch (error) {
console.error(error)
}
})
}
}
})
}, [])
const isActive = useMemo(() => {
return isFocused && currentAppState === 'active'
}, [currentAppState, isFocused])
useEffect(() => {
if (hasPermission) return
requestPermission()
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
return (
<View className='relative'>
{hasPermission ? (
<>
<>
{device && (
<View
className='relative mx-auto aspect-square overflow-hidden rounded-full border-[8px] border-solid border-primaryMain'
style={{
marginTop: insets.top,
}}
>
<ReanimatedCamera
ref={camera}
enableFpsGraph={true}
style={{
width: '100%',
height: '100%',
}}
device={device}
isActive={isActive}
frameProcessor={
isActive
? initializationDelay
? Platform.OS === 'android'
? androidFrameProcessor
: frameProcessor
: undefined
: undefined
}
// animatedProps={zoomAnimatedProps}
/>
</View>
)}
</>
</>
) : (
<>
<View>
<Text className='text-textPrimary typography-h4'>Requesting Camera Permission</Text>
<UiButton onPress={requestPermission} title='Request Permission' />
</View>
</>
)}
{previewImage && (
<UiImage
className='absolute bottom-0 right-10'
source={previewImage}
style={{
width: 112,
height: 112,
}}
/>
)}
</View>
)
} "vision-camera-resize-plugin": "^3.2.0" So the things is: I've found out, that if i delay initialization of frame processor (for example set frameProcessor variable to camera after 1-2 seconds), it works more stable. I've got continuous app crashing in first 1 second opening component with camera view before that. Furthermore, i've tryed to run tflite models in P.S. I was not able to run AsyncFunction("execTFLite") { (modelSrc: String, inputs: [String]) in
guard let modelSrcURL = URL(string: modelSrc.replacingOccurrences(of: "file://", with: "")) else {
throw NSError(domain: "Invalid URL", code: 0, userInfo: nil)
}
do {
let interpreter = try Interpreter(modelPath: modelSrcURL.path)
try interpreter.allocateTensors()
var inputsData = Data()
for input in inputs {
// convert to float and append to inputsData
let inputFloat = Float(input) ?? 0.0
// inputsData.append(Data(bytes: [inputFloat.bitPattern], count: MemoryLayout<Float>.size))
inputsData.append(contentsOf: withUnsafeBytes(of: inputFloat) { Data($0) })
}
try interpreter.copy(inputsData, toInputAt: 0)
try interpreter.invoke()
let outputTensor = try interpreter.output(at: 0)
let outputData = outputTensor.data
return outputData
} catch let error {
print("Error: \(error)")
throw error
}
} And it works fine, with additional 200-300mb memory, but overall whole flow works no more that a couple of minutes at its best (even without tflite model). App continuously crashes with message "The app “myApp” has been killed by the operating system because it is using too much memory." Firstly i thought, that maybe i'm trying to do too much things at 30fps. But according to @mrousavy docs at vision camera, it should be very perfomant and do a lot more that that. |
Beta Was this translation helpful? Give feedback.
-
I believe this issue might be related to this discussion facebook/hermes#1518.
I'm working on an application that uses the react-native-vision-camera library (https://github.com/mrousavy/react-native-vision-camera) along with the useSkiaFrameProcessor hook to render drawings on camera frames. I'm encountering random crashes exclusively on iOS. The issue is difficult to reproduce in a another project, but it consistently occurs in this one, possibly due to the larger project size. The app works fine with useFrameProcessor, but we need the ability to draw on the frames. I tried using Skia Pictures to draw on the canvas to check if the issue was related to useSkiaFrameProcessor hook itself, but I’m still experiencing crashes. Any advice would be greatly appreciated!
Error log below.
Here's some code:
Project Dependencies
[email protected]
[email protected]
@shopify/[email protected]
[email protected]
Beta Was this translation helpful? Give feedback.
All reactions