What's happening?
Hi, I'm working on a feature where I need to detect object through live camera and based on the coordinates received from the modal crop the image and save.
I cannot figure it out how to crop the image, it doesn't work, could someone explain to me.
The main issues is that my crop doesn't seem to be correct.
Help please!
"react-native-vision-camera": "^4.7.2",
"react-native-fast-tflite": "^2.0.0",
"@react-native-community/image-editor": "^4.3.0",
"react-native": "0.81.1",
Loading modal
`const objectDetection = useTensorflowModel(require('../../modal/mobile-object.tflite'));
const model = objectDetection.state === 'loaded' ? objectDetection.model : undefined;
const { resize } = useResizePlugin();
const frameProcessor = useFrameProcessor(
frame => {
'worklet';
if (!model) return;
const resized = resize(frame, {
scale: { width: 192, height: 192 },
pixelFormat: 'rgb',
dataType: 'uint8',
});
const result = model.runSync([resized]);
const boxes = result?.[0];
const scores = result?.[2];
const num = result?.[3];
if (!boxes || !scores || !num) return;
const numDetections = Array.isArray(num)
? num[0]
: typeof num === 'object' && num.length !== undefined
? num[0]
: num;
let bestIndex = -1;
let bestScore = 0;
for (let i = 0; i < numDetections; i++) {
if (scores[i] > bestScore) {
bestScore = Number(scores[i]);
bestIndex = i;
}
}
if (bestScore < 0.8) return;
const boxOffset = bestIndex * 4;
const yMin = Number(boxes[boxOffset]);
const xMin = Number(boxes[boxOffset + 1]);
const yMax = Number(boxes[boxOffset + 2]);
const xMax = Number(boxes[boxOffset + 3]);
if (xMin === undefined || yMin === undefined || xMax === undefined || yMax === undefined) {
return;
}
updateCorners({
xMin: boxes[boxOffset + 1],
yMin: boxes[boxOffset],
xMax: boxes[boxOffset + 3],
yMax: boxes[boxOffset + 2],
});
},
[model],
);
const takeCroppedPhoto = async () => {
if (!detection) return;
const photo = await camera.current?.takePhoto({ flash: onFlashOn ? 'on' : 'off' });
if (!photo) return;
const cropX = detection.xMin * photo.width;
const cropY = detection.yMin * photo.height;
const cropWidth = (detection.xMax - detection.xMin) * photo.width;
const cropHeight = (detection.yMax - detection.yMin) * photo.height;
const cropData = {
offset: { x: cropX, y: cropY },
size: { width: cropWidth, height: cropHeight },
};
const croppedImageUri = await ImageEditor.cropImage(`file://${photo.path}`, cropData);
console.log('croppedOmahe', croppedImageUri);
if (croppedImageUri) {
navigation.navigate('CameraEditScreen', {
imagePreview: croppedImageUri.uri,
});
}
};
return (
<View style={{ flex: 1 }}>
<View
style={{ flex: 1 }}
onLayout={e => {
const { width, height } = e.nativeEvent.layout;
setPreviewSize({ width, height });
}}>
<Camera
ref={camera}
style={StyleSheet.absoluteFill}
device={device}
isActive={true}
photo={true}
resizeMode="cover"
frameProcessor={frameProcessor}
/>
<Canvas style={StyleSheet.absoluteFill}>
{detection && (
<Rect
x={detection.xMin * previewSize.width}
y={detection.yMin * previewSize.height}
width={(detection.xMax - detection.xMin) * previewSize.width}
height={(detection.yMax - detection.yMin) * previewSize.height}
color="red"
style="stroke"
strokeWidth={4}
/>
)}
</Canvas>
</View>
<View style={styles.modelStatus}>
<Text>Model status: {objectDetection.state}</Text>
</View>
<TouchableOpacity style={styles.button} onPress={() => takeCroppedPhoto()}>
<Text style={{ color: 'black' }}>Take Photo</Text>
</TouchableOpacity>
</View>
);
Reproduceable Code
`const objectDetection = useTensorflowModel(require('../../modal/mobile-object.tflite'));
const model = objectDetection.state === 'loaded' ? objectDetection.model : undefined;
const { resize } = useResizePlugin();
const frameProcessor = useFrameProcessor(
frame => {
'worklet';
if (!model) return;
const resized = resize(frame, {
scale: { width: 192, height: 192 },
pixelFormat: 'rgb',
dataType: 'uint8',
});
const result = model.runSync([resized]);
const boxes = result?.[0];
const scores = result?.[2];
const num = result?.[3];
if (!boxes || !scores || !num) return;
const numDetections = Array.isArray(num)
? num[0]
: typeof num === 'object' && num.length !== undefined
? num[0]
: num;
let bestIndex = -1;
let bestScore = 0;
for (let i = 0; i < numDetections; i++) {
if (scores[i] > bestScore) {
bestScore = Number(scores[i]);
bestIndex = i;
}
}
if (bestScore < 0.8) return;
const boxOffset = bestIndex * 4;
const yMin = Number(boxes[boxOffset]);
const xMin = Number(boxes[boxOffset + 1]);
const yMax = Number(boxes[boxOffset + 2]);
const xMax = Number(boxes[boxOffset + 3]);
if (xMin === undefined || yMin === undefined || xMax === undefined || yMax === undefined) {
return;
}
updateCorners({
xMin: boxes[boxOffset + 1],
yMin: boxes[boxOffset],
xMax: boxes[boxOffset + 3],
yMax: boxes[boxOffset + 2],
});
},
[model],
);
const takeCroppedPhoto = async () => {
if (!detection) return;
const photo = await camera.current?.takePhoto({ flash: onFlashOn ? 'on' : 'off' });
if (!photo) return;
const cropX = detection.xMin * photo.width;
const cropY = detection.yMin * photo.height;
const cropWidth = (detection.xMax - detection.xMin) * photo.width;
const cropHeight = (detection.yMax - detection.yMin) * photo.height;
const cropData = {
offset: { x: cropX, y: cropY },
size: { width: cropWidth, height: cropHeight },
};
const croppedImageUri = await ImageEditor.cropImage(`file://${photo.path}`, cropData);
console.log('croppedOmahe', croppedImageUri);
if (croppedImageUri) {
navigation.navigate('CameraEditScreen', {
imagePreview: croppedImageUri.uri,
});
}
};
return (
<View style={{ flex: 1 }}>
<View
style={{ flex: 1 }}
onLayout={e => {
const { width, height } = e.nativeEvent.layout;
setPreviewSize({ width, height });
}}>
<Camera
ref={camera}
style={StyleSheet.absoluteFill}
device={device}
isActive={true}
photo={true}
resizeMode="cover"
frameProcessor={frameProcessor}
/>
<Canvas style={StyleSheet.absoluteFill}>
{detection && (
<Rect
x={detection.xMin * previewSize.width}
y={detection.yMin * previewSize.height}
width={(detection.xMax - detection.xMin) * previewSize.width}
height={(detection.yMax - detection.yMin) * previewSize.height}
color="red"
style="stroke"
strokeWidth={4}
/>
)}
</Canvas>
</View>
<View style={styles.modelStatus}>
<Text>Model status: {objectDetection.state}</Text>
</View>
<TouchableOpacity style={styles.button} onPress={() => takeCroppedPhoto()}>
<Text style={{ color: 'black' }}>Take Photo</Text>
</TouchableOpacity>
</View>
);
Relevant log output
Camera Device
Device
IOS and Android
VisionCamera Version
4.7.2
Can you reproduce this issue in the VisionCamera Example app?
I didn't try (⚠️ your issue might get ignored & closed if you don't try this)
Additional information
What's happening?
Hi, I'm working on a feature where I need to detect object through live camera and based on the coordinates received from the modal crop the image and save.
I cannot figure it out how to crop the image, it doesn't work, could someone explain to me.
The main issues is that my crop doesn't seem to be correct.
Help please!
Loading modal
Reproduceable Code
Relevant log output
Camera Device
noneDevice
IOS and Android
VisionCamera Version
4.7.2
Can you reproduce this issue in the VisionCamera Example app?
I didn't try (⚠️ your issue might get ignored & closed if you don't try this)
Additional information