How to create a folder in Django to store user inputs - python-3.x

Goal of Code:
User uploads a video to our django site.
Use opencv to split it up into individual frames (this works well).
We store the frames in a unique folder on our backend (this is where the problem lies).
What we need help with:
Creation of a unique folder based on user inputted video with django with correct path.
Save the frames of the video into that folder.
The code thus far:
from django.db import models
from django.urls import reverse
import uuid
import cv2
import os
from django.db import models
from PIL import Image, ImageFilter
from django.urls import reverse
from PIL import Image
class Video(models.Model):
vid = models.FileField(upload_to=image_upload_location(filename='jpg'))
img = models.ImageField(upload_to=image_upload_location(filename='jpg'))
date_added = models.DateTimeField(auto_now_add=True)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
def mp4_to_image(self, *args, **kwargs):
super().save(*args, **kwargs)
This is part we struggling with, creating a unique folder for each video input.
This code works on my local computer path however we need to integrate it with django framework
try:
if not os.exists('exit_folder_path'):
os.makedirs('exit_folder_path')
except OSError:
print ('Error: Creating directory')
Here we use opencv to split the video into individual frames and then save them.
The imwrite() function saves the frame as a jpeg in path defined by name parameter.
We would like these files to be saved to the folder path defined in previous section.
cap = cv2.VideoCapture(self.vid.path)
currentFrame = 0
while (True):
# ret returns a Boolean Value if the frame can be read
ret, frame = cap.read()
# If the frame can't be read then we exit the loop
if ret == False:
break
# Saves image of the current frame in jpg file
name = 'exit_folder_path' + str(currentFrame).zfill(6) + '.jpg'
cv2.imwrite(name, frame)
currentFrame += 1
cap.release()
Problems:
The code saves the user inputted video to a folder called jpg, but no folder is created with the individual frames from the video.
Any and all help is much appreciated.

I guess you need a media folder.
Check here -> https://docs.djangoproject.com/en/3.1/topics/files/

Related

Python how convert images in directory in same sort order

My file is a scanned pdf's not converted to pdf thats why I used pytesseract. I just started python so I dont have any idea about program flow I just did what I think is possible I know there is another way to do this but what I did was:
converted the pdf to single images
scanned these images and get text length
delete image with equal or less than 1 value
Convert again images to single pdf.
My scanned pdf has binder holes I think python read those holes as a single char/text taht why I have a value of 1.
My problem is:
when I convert the images back to pdf it is not sorted.
I want this to run this code into a batch (If I have 100 pdf in a folder this program will delete empty pages). I have no Idea what to study next.
Here's my code:
```
*from PIL import Image
from pytesseract import pytesseract
from pdf2image import convert_from_path
import os
#Define path to tessaract.exe
path_to_tesseract = r'/usr/bin/tesseract'
#create folder
if not os.path.exists('Pages Images'):
os.makedirs('Pages Images')
pages = convert_from_path('t1.pdf')
for i in range(len(pages)):
pages[i].save('Pages Images/page'+ str(i) +'.jpg', 'JPEG')
#Define path to image
path_to_image = r'Pages Images/'
#Point tessaract_cmd to tessaract.exe
pytesseract.tesseract_cmd = path_to_tesseract
#Get file name in directory
for root, dirs, file_names in os.walk(path_to_image):
#iterate over filenames
for file_name in file_names:
#open image
img = Image.open(path_to_image + file_name)
text = pytesseract.image_to_string(img)
#Delete image if less than 1
if len(text) <= 1:
os.remove(path_to_image + file_name)
print(file_name, " has been deleted")
# print(file_name, " ", len(text))
#Convert back Images to pdf
for root, dirs, image_names in os.walk(path_to_image):
image_1 = [
Image.open(path_to_image + image_name)
for image_name in image_names
]
pdf_path = "bd1.pdf"
image_1[0].save(
pdf_path, "PDF" ,resolution=100.0, save_all=True, append_images=image_1[1:]
)
#---------PROBLEMS PDF IS NOT SORTED*

How do I read a request.FILES into DataSource in Geodjango

So, the goal is to create a webpage to load a .shp file into and get a summary of some calculations as a JsonResponse. I have prepared the calculations and everything and it works nicely when I add a manual path to the file in question. However, the goal is for someone else to be able to upload the data and get back the response so I can't hardcode my path.
The overall approach:
Read in a through forms.FileField() and request.FILES['file_name']. After this, I need to transfer this request.FILES object to DataSource in order to read it in. I would rather not upload the file on pc if possible but work directly from the memory.
forms.py
from django import forms
from django.core.files.storage import FileSystemStorage
class UploadFileForm(forms.Form):
# title = forms.CharField(max_length=50)
file = forms.FileField()
views.py
import json
import os
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.template import loader
from django.contrib import messages
from django.views.generic import TemplateView
from django.http import JsonResponse
from django.conf import settings
from .forms import UploadFileForm
from . import models
from django.shortcuts import redirect
from gisapp.functions.functions import handle_uploaded_file, handle_uploaded_file_two
from django.contrib.gis.gdal import DataSource
from django.core.files.uploadedfile import UploadedFile, TemporaryUploadedFile
import geopandas as gpd
import fiona
def upload_file(request):
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
f = request.FILES['file']
# geo2 = gpd.read_file(f)
# print(geo2)
# f_path = os.path.abspath(os.path.join(os.path.dirname(f), f))
# f_path = TemporaryUploadedFile.temporary_file_path(UploadedFile(f))
# print(f_path)
# f_path = f.temporary_file_path()
# new_path = request.FILES['file'].temporary_file_path
# print(f'This is file path: {f_path}')
# print(f'This is file path: {new_path}')
# data = DataSource(f'gisapp/data/{f}') -- given an absolute path it works great
data = DataSource(f) -- constantly failing
# data = DataSource(new_path)
# print(f'This is file path: {f_path}')
layer = data[0]
if layer.geom_type.name == "Polygon" or layer.geom_type.name == "LineString":
handle_uploaded_file(request.FILES['file'])
elif layer.geom_type.name == "Point":
handle_uploaded_file_two(request.FILES['file'])
return JsonResponse({"Count": f"{handle_uploaded_file_two(request.FILES['file'])[0]}", "Bounding Box": f"{handle_uploaded_file_two(request.FILES['file'])[1]}"})
# return JsonResponse({"Count": f"{handle_uploaded_file(request.FILES['file'])[0]}", "Minimum": f"{handle_uploaded_file(request.FILES['file'])[1]}", "Maximum": f"{handle_uploaded_file(request.FILES['file'])[1]}"})
# instance = models.GeometryUpload(file_field=request.FILES['file'])
# instance.save()
# # return HttpResponseRedirect('/success/')
else:
form = UploadFileForm()
return render(request, 'upload.html', {'form': form})
Error I get:
django.contrib.gis.gdal.error.GDALException: Invalid data source input type: <class 'django.core.files.uploadedfile.InMemoryUploadedFile'>
Now as you can see from the upload_file() in views.py, I tried a multitude of operations and when I add an absolute path, it works, but besides that I can't seem to upload the file to DataSource so that I can use it in my later analysis.
Looking at how Django handles this, it doesn't appear possible to work off of an in memory file. The path to the file is passed to the C API for OGR which then handles opening the file and reading it in.
A possible solution that I am trying myself is to have the user zip their shape files (.shp,.shx.,dbf etc.) beforehand. The zip file is then uploaded and unzipped. The shp files can then be read. Hope this helps
I face the same problem and my workaround was to save the file upload by the user in a temporary folder, then pass the absolute path of the temporary file to my DataSource. After finish all my process with the temporary file, I deleted.
The downside of this method is the execution time, is slow.

Using Azure Face Api in Python, How to Return a single faceId or a group of FaceIds if the same person is detected in Video Stream?

I am using Azure Face APi to detect faces in video stream, but for each detected face Azure returns a unique faceId( which is exactly what the documentation says).
The problem is, Let's say Mr.ABC appears in 20 video frames, 20 unique faceIds gets generated. I want something that Azure Face should return me a single faceId or a group of FaceIds generated particularly for Mr.ABC so that I can know that its the same person that stays in front of camera for x amount of time.
I have read the documentation of Azure Facegrouping and Azure FindSimilar, but didn't understand how can I make it work in case of live video stream.
The code I am using for detecting faces using Azure face is given below:
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person, SnapshotObjectType, OperationStatusType
import cv2
import os
import requests
import sys,glob, uuid,re
from PIL import Image, ImageDraw
from urllib.parse import urlparse
from io import BytesIO
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient,__version__
face_key = 'XABC' #API key
face_endpoint = 'https://XENDPOINT.cognitiveservices.azure.com' #endpoint, e.g. 'https://westus.api.cognitive.microsoft.com'
credentials = CognitiveServicesCredentials(face_key)
face_client = FaceClient(face_endpoint, credentials)
camera = cv2.VideoCapture(0)
samplenum =1
im = ""
work_dir = os.getcwd()
person_group_id = 'test02-group'
target_person_group_id = str(uuid.uuid4())
face_ids = []
#cv2 font
font = cv2.FONT_HERSHEY_SIMPLEX
#empty tuple
width = ()
height = ()
left=0
bottom=0
def getRectangle(faceDictionary):
rect = faceDictionary.face_rectangle
left = rect.left
top = rect.top
right = left + rect.width
bottom = top + rect.height
return ((left, top), (right, bottom))
while True:
check,campic = camera.read()
samplenum=samplenum+1
cv2.imwrite("live_pics/"+str(samplenum)+".jpg",campic)
path = work_dir+"/live_pics/"+str(samplenum)+".jpg"
#im = cv2.imread("pics/"+str(samplenum)+".jpg")
stream = open(path, "r+b")
detected_faces = face_client.face.detect_with_stream(
stream,
return_face_id=True,
return_face_attributes=['age','gender','emotion'],recognitionModel="recognition_03")
for face in detected_faces:
width,height = getRectangle(face)
cv2.rectangle(campic,width,height,(0,0,170),2)
face_ids.append(face.face_id)
#cv2.waitKey(100);
if(samplenum>10):
break
cv2.imshow("campic", campic)
if cv2.waitKey(1) == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
There is no magic on Face API: you have to process it with 2 steps for each face found.
What I suggest is to use "Find similar":
at the beginning, create a "FaceList"
then process your video:
Face detect on each frame
For each face found, use find similar operation on the face list created. If there is no match (with a sufficient confidence), add the face to the facelist.
At the end, your facelist will contain all the different people found on the video.
For your realtime use-case, don't use "Identify" operation with PersonGroup / LargePersonGroup (the choice between those 2 depends on the size of the group), because you will be stuck by the need of training on the group. Example, you would be doing the following:
Step 1, 1 time: generate the PersonGroup / LargePersonGroup for this execution
Step 2, N times (for each image where you want to identify the face):
Step 2a: face detect
Step 2b: face "identify" on each detected face based on the PersonGroup / LargePersonGroup
Step 2c: for each unidentified face, add it to the PersonGroup / LargePersonGroup.
Here the issue is the fact that after 2c, you have to train your group again. Even if it is not so long, it cannot be used in real time as it will be too long.
Per my understanding, you want to show a person's name/identity instead of the face ID detected from Face API.
If so, after you get face ids via Face Detect API, you should use the Face Identify API to do this. You can get a person ID if faces could be recognized by Azure Face service, With this ID, you can just use PersonGroup Person API to get this person's information.
I also wrote a simple demo for you, in this demo, there is only 1 image, we can just image it as a video frame. I created a person group with one superman person and added some faces to him.
This is the code below :
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
import numpy as np
import asyncio
import io
import glob
import os
import sys
import time
import uuid
import requests
from urllib.parse import urlparse
from io import BytesIO
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
imPath = "<image path>";
ENDPOINT = '<endpoint>'
KEY = '<key>'
PERSON_GROUP_ID = '<person group name>'
face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
im = np.array(Image.open(imPath), dtype=np.uint8)
faces = face_client.face.detect_with_stream(open(imPath, 'r+b'),recognition_model='recognition_03');
# Create figure and axes
fig,ax = plt.subplots()
# Display the image
ax.imshow(im)
for i in range(len(faces)):
face = faces[i]
rect =patches.Rectangle((face.face_rectangle.left,face.face_rectangle.top),face.face_rectangle.height,face.face_rectangle.width,linewidth=1,edgecolor='r',facecolor='none')
detected_person = face_client.face.identify([face.face_id],PERSON_GROUP_ID)[0]
if(len(detected_person.candidates) > 0):
person_id = detected_person.candidates[0].person_id
person = face_client.person_group_person.get(PERSON_GROUP_ID,person_id)
plt.text(face.face_rectangle.left,face.face_rectangle.top,person.name,color='r')
else:
plt.text(face.face_rectangle.left,face.face_rectangle.top,'unknown',color='r')
ax.add_patch(rect)
plt.show()
Result:

Using a loop to select multiple images, process and then output to a directory

at the moment, the below code extracts faces from a single image and then outputs those faces to a folder. How can I input a directory full of images and do the same thing? I can't seem to add a directory into the image - face_recognition.load_image_file(C:/directory) as it keeps coming back with permission denied error. Is there a way I can ask it to loop through all the images in a particular directory and then output them to my path?
from PIL import Image
import face_recognition
image = face_recognition.load_image_file(r"C:\Users\Julio\Desktop\Face Extraction\Input\IMG_0421.JPG")
path = r"C:\Users\Julio\Desktop\Face Extraction\Output\face"
face_locations = face_recognition.face_locations(image)
print("I found {} face(s) in this photograph.".format(len(face_locations)))
face_counter = 0
for face_location in face_locations:
top, right, bottom, left = face_location
print("I found a face in image location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))
face_image = image[top:bottom, left:right]
pil_image = Image.fromarray(face_image)
pil_image.save(str(path) + str(face_counter) + ".jpg")
face_counter += 1
I did something similar recently which I hope will be of help to you. I basically set my path to a variable, then the listdir of that path to a variable called img_list, then cycled through that:
# media thumbnails
path = "/media/default_thumbnails/"
img_list = listdir(path)
for image in img_list:
"""
Need to open an image file from the directory as a PIL Image.
Then create a BytesIO and save that as the thumbnail.
"""
try:
img = PILImage.open(path + image)
rawfile = BytesIO()
img.save(rawfile, format='jpeg')
mt = MediaThumbnail.objects.create()
mt.thumbnail.save(image, rawfile)
rawfile.close()
except:
print(f" skipping {image}")
print(" thumbnails created")
To integrate that into your code you'd do something like:
from PIL import Image
import face_recognition
path = "C:\Users\Julio\Desktop\Face Extraction\Input"
img_list = listdir(path)
for input_img in img_list:
image = face_recognition.load_image_file(input_img)
[.... the rest of your code ]

how to loop over all images and add border to all images in python?

I am working on image editing in python 3.7. I have a function which add border to all the images. But It returns only first image in folder and exists. This is my function:
from PIL import Image
import cv2
import numpy as np
import datetime
time = datetime.datetime.now()
def img_filter(img_in,border):
img = Image.open(border)
background = Image.open(img_in)
size = background.size
img = img.resize(size,Image.ANTIALIAS)
background = background.resize(size,Image.ANTIALIAS)
background.paste(img,(0,0),img)
saved = background.save(f"./img/1{time}.jpg")
print(saved)
img.close()
AND thats my code:
path = glob.glob("./img/*.jpg")
for img in path:
with open(img, 'rb') as file :
img = Image.open(file)
img_filter(img,'v.png')
please help me.
The time variable is global. So the value remains same for all the images. Either you can create time variable inside the img_filter method or you can create that variable inside the for loop and pass it as param to the method.
I personally would have preferred to create a curr_time variable inside the for loop.

Resources