Face detection not working in android studio - python-3.x

I'm new to android studio and want to do face detection. The face detector code uses opencv and is written in python. I'm using chaquopy as the python SDK. When i run the app, the face is not detected. It is not showing any error also. Can someone help me out. Below is my MainActivity.java code:
public class MainActivity extends AppCompatActivity {
Button btn;
ImageView iv;
// now take bitmap and bitmap drawable to get image from image view
BitmapDrawable drawable;
Bitmap bitmap;
String imageString="";
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
btn = (Button)findViewById(R.id.submit);
iv = (ImageView)findViewById(R.id.image_view);
if(!Python.isStarted())
Python.start(new AndroidPlatform(this));
final Python py = Python.getInstance();
btn.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
// on click over button, get image from image view
drawable = (BitmapDrawable)iv.getDrawable();
bitmap = drawable.getBitmap();
imageString = getStringImage(bitmap);
// now in imagestring, we get encoded image string
// now pass this input string to python script
PyObject pyo = py.getModule("myscript");
// calling the main function in python code and passing image string as parameter
PyObject obj = pyo.callAttr("main", imageString);
// obj will return value ie. our image string
String str = obj.toString();
// convert it to byte array
byte data[] = android.util.Base64.decode(str, android.util.Base64.DEFAULT);
// now convert it to bitmap
Bitmap bmp = BitmapFactory.decodeByteArray(data, 0, data.length);
//now set this bitmap to imageview
iv.setImageBitmap(bmp);
}
});
}
// function to convert this image into byte array and finally into base 64 string
private String getStringImage(Bitmap bitmap) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
bitmap.compress(Bitmap.CompressFormat.PNG, 100, baos);
// store in byte array
byte[] imageBytes = baos.toByteArray();
// finally encode to string
String encodedImage = android.util.Base64.encodeToString(imageBytes, android.util.Base64.DEFAULT); // Base64.DEFAULT
return encodedImage;
}
}
and the below shown is my python script "myscript.py"
import numpy as np
import cv2
import io
from PIL import Image
import base64
import face_recognition
def main(data):
decoded_data = base64.b64decode(data)
np_data = np.fromString(decoded_data, np.uint8)
img = cv2.imdecode(np_data, cv2.IMREAD_UNCHANGED)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face_locations = face_recognition.face_locations(img_gray)
for(top,right,bottom,left) in face_locations:
cv2.rectangle(img_rgb, (left,top),(right,bottom),(0,0,255),8)
# convert this image to PIL
pil_im = Image.fromarray(img_rgb)
#convert this image to byte
buff = io.BytesIO()
pil_im.save(buff, format="PNG")
#converting to base64
img_str = base64.b64encode(buff.getvalue())
return ""+str(img_str, 'utf-8')
My minSdkVersion is 16 and targetSdkVersion is 30.
I'm not understanding what is the problem with this code. Can someone help me.
Thanks in advance.

Related

Flutter/Mediapipe: how to save a Flutter Canvas/CustomPainter with mediapipe pose outline to an image file?

We are using code shown below from this package package:google_mlkit_pose_detection/google_mlkit_pose_detection.dart (on https://pub.dev/packages/google_mlkit_pose_detection)
to save the phone camera image with mediapipe pose outline to an image file on the phone. The library shows key points of human pose like shoulders and arms with circles and lines using Canvas and CustomPainter. We can see these on the phone screen but we want to save the image and the human pose points to a file in the phone. In other words we want to save the Painting into an image on the phone
A similar question was asked here Flutter: How would one save a Canvas/CustomPainter to an image file? but without the mediapipe component - we tried their solution but it didn't work for us.
The main problem in the code example shown below is that in the line picture = recorder.endRecording(); picture is always null.
import 'dart:math';
import 'dart:typed_data';
import 'dart:ui';
import 'dart:ui' as ui;
import 'package:ace_example/painters/keypoints.dart';
import 'package:flutter/material.dart';
import 'package:google_mlkit_pose_detection/google_mlkit_pose_detection.dart';
import 'coordinates_translator.dart';
class PosePainter extends CustomPainter {
PosePainter(this.poses, this.absoluteImageSize, this.rotation);
final List<Pose> poses;
final Size absoluteImageSize;
final InputImageRotation rotation;
Picture? picture;
final paintMid = Paint()
..style = PaintingStyle.stroke
..strokeWidth = 2.0
..color = const Color.fromRGBO(224, 224, 224, 1);
final paintLeft = Paint()
..style = PaintingStyle.fill
..strokeWidth = 2.0
..color = const Color.fromRGBO(255, 138, 0, 1);
final paintRight = Paint()
..style = PaintingStyle.fill
..strokeWidth = 2.0
..color = const Color.fromRGBO(0, 217, 231, 1);
final cycleRadius = 2.0;
final cycleBorderRadius = max(2.0 + 1, 2.0 * 1.2);
final lineWidth = 6.0;
final posePoints = DownwardDogPoints();
#override
void paint(Canvas canvas, Size size) {
final recorder = ui.PictureRecorder();
final canvas = Canvas(
recorder,
Rect.fromPoints(
const Offset(0.0, 0.0), const Offset(0.0, 0.0)));
for (final pose in poses) {
pose.landmarks.forEach((poseType, landmark) {
// draw circle
// white
canvas.drawCircle(
Offset(
translateX(landmark.x, rotation, size, absoluteImageSize),
translateY(landmark.y, rotation, size, absoluteImageSize),
),
cycleBorderRadius,
paintMid);
}
});
}
picture = recorder.endRecording();
}
Future<Image> generateImage() async {
// ui.Image img = (await picture.toImage(384, 683)) as Image;
// return img;
if (picture != null){
ui.Image img = await picture!.toImage(384, 683);
// final pngBytes = await img.toByteData(format: ui.EncodingFormat.png());
ByteData pngBytes = (await img.toByteData(format: ImageByteFormat.png))!;
Uint8List imgList = Uint8List.view(pngBytes.buffer);
return Image.memory(imgList);
} else {
print("Picture is empty");
}
Uint8List imgList = Uint8List.fromList([0, 0, 0 ,0]);
return Image.memory(imgList);
}
#override
bool shouldRepaint(covariant PosePainter oldDelegate) {
return oldDelegate.absoluteImageSize != absoluteImageSize ||
oldDelegate.poses != poses;
}
}

Transcode SVG to JPEG

my picture in SVG format include the latest data 122.6 but in JPEG format the last data point is 122.1
How can I convert the image in svg-format to jpeg-format without the lost of the latest data
renderer ?
padding margins ?
size ?
What is wrong in my code ??
best ghost
package helix;
import java.io.*;
import java.nio.file.Path;
import java.nio.file.Paths;
import org.apache.batik.transcoder.image.JPEGTranscoder;
import org.apache.fop.svg.PDFTranscoder;
import org.apache.batik.transcoder.TranscoderInput;
import org.apache.batik.transcoder.TranscoderOutput;
import org.apache.batik.ext.awt.image.codec.*;
public class genesisMATPIsvg2jpg {
public static void main(String[] args) throws Exception {
//Step -1: We read the input SVG document into Transcoder Input
String svg_URI_input = new File(basePath + "/var/gWebClient/Materialkostenindex2015=100.svg").toURL().toString();
TranscoderInput input_svg_image = new TranscoderInput(svg_URI_input);
//Step-2: Define OutputStream to JPG file and attach to TranscoderOutput
OutputStream jpg_ostream = new FileOutputStream(basePath + "/var/gWebClient/Materialkostenindex2015=100.jpeg");
TranscoderOutput output_jpg_image = new TranscoderOutput(jpg_ostream);
// Step-3: Create JPEGTranscoder and define hints
JPEGTranscoder my_converter = new JPEGTranscoder();
Object jpegQuality1 = new Float(800);
Object jpegQuality2 = new Float(600);
my_converter.addTranscodingHint(JPEGTranscoder.KEY_WIDTH, jpegQuality1);
my_converter.addTranscodingHint(JPEGTranscoder.KEY_HEIGHT, jpegQuality2);
my_converter.addTranscodingHint(JPEGTranscoder.KEY_QUALITY,new Float(0.9));
// Step-4: Write output
my_converter.transcode(input_svg_image, output_jpg_image);
// Step 5- close / flush Output Stream
jpg_ostream.flush();
jpg_ostream.close();
}
}

Apache POI, converting powerpoint slides to images, images are low quality

Here is my code
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.poi.hslf.usermodel.HSLFSlide;
import org.apache.poi.hslf.usermodel.HSLFSlideShow;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.awt.*;
import java.awt.geom.Rectangle2D;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
#Service
#Slf4j
#RequiredArgsConstructor(onConstructor = #__(#Autowired))
public class FileConverterService {
public void convertPptToImages() throws Exception {
ClassLoader classLoader = getClass().getClassLoader();
File file = new File(classLoader.getResource("Sylon_GuidedPath_Sprint22Deck.ppt").getFile());
Document pdfDocument = new Document();
// PdfWriter pdfWriter = PdfWriter.getInstance(pdfDocument, new FileOutputStream(""));
FileInputStream is = new FileInputStream(file);
HSLFSlideShow ppt = new HSLFSlideShow(is);
is.close();
Dimension pgsize = ppt.getPageSize();
pdfDocument.setPageSize(new Rectangle((float) pgsize.getWidth(), (float) pgsize.getHeight()));
// convert to images
int idx = 1;
for (HSLFSlide slide : ppt.getSlides()) {
BufferedImage img =
new BufferedImage(pgsize.width, pgsize.height, BufferedImage.TYPE_INT_RGB);
Graphics2D graphics = img.createGraphics();
graphics.setRenderingHint(
RenderingHints.KEY_ALPHA_INTERPOLATION, RenderingHints.VALUE_ALPHA_INTERPOLATION_QUALITY);
graphics.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
graphics.setRenderingHint(
RenderingHints.KEY_TEXT_ANTIALIASING, RenderingHints.VALUE_TEXT_ANTIALIAS_ON);
graphics.setRenderingHint(RenderingHints.KEY_RENDERING, RenderingHints.VALUE_RENDER_QUALITY);
graphics.setRenderingHint(
RenderingHints.KEY_INTERPOLATION, RenderingHints.VALUE_INTERPOLATION_BICUBIC);
graphics.setRenderingHint(
RenderingHints.KEY_FRACTIONALMETRICS, RenderingHints.VALUE_FRACTIONALMETRICS_ON);
// clear the drawing area
graphics.setPaint(Color.white);
graphics.fill(new Rectangle2D.Float(0, 0, pgsize.width, pgsize.height));
// render
slide.draw(graphics);
// save the output
ImageWriter jpgWriter = ImageIO.getImageWritersByFormatName("jpg").next();
ImageWriteParam jpgWriteParam = jpgWriter.getDefaultWriteParam();
jpgWriteParam.setCompressionMode(ImageWriteParam.MODE_EXPLICIT);
jpgWriteParam.setCompressionQuality(1f);
jpgWriter.setOutput(new FileImageOutputStream(
new File("slide-" + idx + ".jpg")));
IIOImage outputImage = new IIOImage(img, null, null);
jpgWriter.write(null, outputImage, jpgWriteParam);
jpgWriter.dispose();
idx++;
}
}
I based my code off this documentation, http://poi.apache.org/components/slideshow/how-to-shapes.html#Render
I have tried both jpeg and png, and the image seems to be fairly low resolution and the text is difficult to read compared to the original .ppt. Is there any way to increase the resolution/quality of the images?
What you can try
Applying RenderingHints to the graphics, below is a sample I created comparing the image with/without rendering hint. You can see that the character looks better with rendering hint.
Increase the compression quality for the jpeg image.
Following program demonstrates how to generate image with/without rendering hint and create image with 100% compression quality.
import java.awt.Dimension;
import java.awt.Graphics2D;
import java.awt.RenderingHints;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.FileInputStream;
import javax.imageio.IIOImage;
import javax.imageio.ImageIO;
import javax.imageio.ImageWriteParam;
import javax.imageio.ImageWriter;
import javax.imageio.plugins.jpeg.JPEGImageWriteParam;
import javax.imageio.stream.FileImageOutputStream;
import org.apache.poi.hslf.usermodel.HSLFSlide;
import org.apache.poi.hslf.usermodel.HSLFSlideShow;
public class ImproveSlideConvertToImageQuality {
public static void main(String[] args) throws Exception {
convertPptToImages(true);
convertPptToImages(false);
}
public static void convertPptToImages(boolean withRenderHint) throws Exception {
File file = new File("test.ppt");
String suffix = withRenderHint ? "-with-hint" : "-without-hint";
try (FileInputStream is = new FileInputStream(file); HSLFSlideShow ppt = new HSLFSlideShow(is)) {
Dimension pgsize = ppt.getPageSize();
int idx = 1;
for (HSLFSlide slide : ppt.getSlides()) {
BufferedImage img = new BufferedImage(pgsize.width, pgsize.height, BufferedImage.TYPE_INT_RGB);
Graphics2D graphics = img.createGraphics();
if (withRenderHint) {
graphics.setRenderingHint(RenderingHints.KEY_INTERPOLATION,
RenderingHints.VALUE_INTERPOLATION_BILINEAR);
graphics.setRenderingHint(
RenderingHints.KEY_ALPHA_INTERPOLATION, RenderingHints.VALUE_ALPHA_INTERPOLATION_QUALITY);
graphics.setRenderingHint(
RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
graphics.setRenderingHint(
RenderingHints.KEY_FRACTIONALMETRICS, RenderingHints.VALUE_FRACTIONALMETRICS_ON);
graphics.setRenderingHint(
RenderingHints.KEY_RENDERING, RenderingHints.VALUE_RENDER_QUALITY);
graphics.setRenderingHint(RenderingHints.KEY_TEXT_ANTIALIASING,
RenderingHints.VALUE_TEXT_ANTIALIAS_GASP);
}
// render
slide.draw(graphics);
final ImageWriter writer = ImageIO.getImageWritersByFormatName("jpg").next();
writer.setOutput(new FileImageOutputStream(
new File("slide-" + idx + suffix + ".jpeg")));
JPEGImageWriteParam jpegParams = new JPEGImageWriteParam(null);
jpegParams.setCompressionMode(ImageWriteParam.MODE_EXPLICIT);
jpegParams.setCompressionQuality(1f);
// writes the file with given compression level
// from your JPEGImageWriteParam instance
IIOImage image = new IIOImage(img, null, null);
writer.write(null, image, jpegParams);
writer.dispose();
idx++;
}
}
}
}
References:
Controlling Rendering Quality
Setting jpg compression level with ImageIO in Java

How do I integrate QR code reading application to Sony SmartEyeGlass?

We are developing an application for Sony SmartEyeGlass. Firstly, we created it with Android Studio and android tablet.Now, we are working with Sample Camera Extension sample to integrate it our Project. But there is a lot of details. Someone can help about this subject?
The Sample Camera extension is a great place to start building your QR code reader. In the SampleCameraControl.java there is a function called cameraEventOperation. In this function you will see an example of how to pull the camera data down in to a bitmap. Here is the code for reference:
private void cameraEventOperation(CameraEvent event) {
if ((event.getData() != null) && ((event.getData().length) > 0)) {
data = event.getData();
bitmap = BitmapFactory.decodeByteArray(data, 0, data.length);
}
You can take this data and send it to your QR code reader to scan for QR codes. Let me know if this helps!
----- Update ----
You can use a function like this to pass a bitmap to the Google Zxing library. Use should put this in something like an Async task:
import com.google.zxing.BarcodeFormat;
import com.google.zxing.BinaryBitmap;
import com.google.zxing.ChecksumException;
import com.google.zxing.DecodeHintType;
import com.google.zxing.FormatException;
import com.google.zxing.LuminanceSource;
import com.google.zxing.MultiFormatReader;
import com.google.zxing.NotFoundException;
import com.google.zxing.RGBLuminanceSource;
import com.google.zxing.Reader;
import com.google.zxing.Result;
import com.google.zxing.common.HybridBinarizer;
//This function sends the provided bitmap to Google Zxing
public static String readBarcodeImage(Bitmap bMap) {
String contents = null;
int[] intArray = new int[bMap.getWidth()*bMap.getHeight()];
//copy pixel data from the Bitmap into the 'intArray' array
bMap.getPixels(intArray, 0, bMap.getWidth(), 0, 0, bMap.getWidth(), bMap.getHeight());
LuminanceSource source = new RGBLuminanceSource(bMap.getWidth(), bMap.getHeight(), intArray);
BinaryBitmap bitmap = new BinaryBitmap(new HybridBinarizer(source));
Reader reader = new MultiFormatReader();// use this otherwise ChecksumException
try {
Hashtable<DecodeHintType,Object> hints=new Hashtable<DecodeHintType,Object>();
hints.put(DecodeHintType.TRY_HARDER,Boolean.TRUE);
Vector<BarcodeFormat> decodeFormats = new Vector<BarcodeFormat>();
decodeFormats.add(BarcodeFormat.QR_CODE);
hints.put(DecodeHintType.POSSIBLE_FORMATS,decodeFormats);
Result result = reader.decode(bitmap, hints);
BarcodeFormat format = result.getBarcodeFormat();
contents = result.getText() + " : "+format.toString();
} catch (NotFoundException e) { e.printStackTrace(); }
catch (ChecksumException e) { e.printStackTrace(); }
catch (FormatException e) { e.printStackTrace(); }
return contents;
}

AForge Hough Transform

Im trying to do an experiment on how to use the HoughTransformation class of AForge. Im using this class to try to count the number of circles on an image. But I always got this error message: Unsupported pixel format of the source image.
Here is my code:
private void CountCircles(Bitmap sourceImage)
{
HoughCircleTransformation circleTransform = new HoughCircleTransformation(15);
circleTransform.ProcessImage(sourceImage);
Bitmap houghCircleImage = circleTransform.ToBitmap();
int numCircles = circleTransform.CirclesCount;
MessageBox.Show("Number of circles found : "+numCircles.ToString());
}
HoughCircleTransformation expects a binary bitmap.
private void CountCircles(Bitmap sourceImage)
{
var filter = new FiltersSequence(new IFilter[]
{
Grayscale.CommonAlgorithms.BT709,
new Threshold(0x40)
});
var binaryImage = filter.Apply(bitmap);
HoughCircleTransformation circleTransform = new HoughCircleTransformation(15);
circleTransform.ProcessImage(binaryImage);
Bitmap houghCircleImage = circleTransform.ToBitmap();
int numCircles = circleTransform.CirclesCount;
MessageBox.Show("Number of circles found : "+numCircles.ToString());
}

Resources