fetch specific protobuf members - python-3.x

I want to get an array of all the lines which start by text: (till the first asset_performance_label)
I saw this post, but wasn't sure how to apply it.
Should I convert the proto to string, as I have tried?
text = extract_text_from_proto(r"(\w+)text:(\w+)asset_performance_label:", '''[pinned_field: HEADLINE_1
text: "5 Best Products"
asset_performance_label: PENDING
policy_summary_info
{
review_status: REVIEWED
approval_status: APPROVED
}
, pinned_field: HEADLINE_1
text: "10 Best Products 2021"
asset_performance_label: PENDING
policy_summary_info
{
review_status: REVIEWED
approval_status: APPROVED
}''')
def extract_text_from_proto(regex, proto_string):
regex = re.escape(regex)
result_array = [m.group() for m in re.finditer(regex, proto_string)]
return result_array
# return [extract_text(each_item, regex) for each_item in proto],
def extract_text(regex, item):
m = re.match(regex, str(item))
if m is None:
# text = "MISSING TEXT"
raise Exception("Ad is missing text")
else:
text = m.group(2)
return text
Expected result: ["5 Best Products","10 Best Products 2021"]
What if I want to match (optional) pinned_field: (word)? so the result could be: [HEADLINE_1: 5 Best Products', 'HEADLINE_1:10 Best Products 2021', 'some_text_without_pinned_field']` ?

You can use a single capture group, and match assert_performance_label in the next line. Use re.findall to return the group values.
\btext:\s*"([^"]+)"\n\s*asset_performance_label\b
The pattern matches
\btext:\s*" Match text: predeced by a word boundary \b to prevent a partial match
([^"]+) Capture group 1, match 1+ chars other than a double quote
"\n\s* Match a newline an optional whitespace chars
asset_performance_label\b Match `asset_performance_label followed by a word boundary
For example
import re
def extract_text_from_proto(regex, proto_string):
return re.findall(regex, proto_string)
text = extract_text_from_proto(r'\btext:\s*"([^"]+)"\n\s*asset_performance_label\b', '''[pinned_field: HEADLINE_1
text: "5 Best Products"
asset_performance_label: PENDING
policy_summary_info
{
review_status: REVIEWED
approval_status: APPROVED
}
, pinned_field: HEADLINE_1
text: "10 Best Products 2021"
asset_performance_label: PENDING
policy_summary_info
{
review_status: REVIEWED
approval_status: APPROVED
}''')
print(text)
Output
['5 Best Products', '10 Best Products 2021']

Related

How does one extract the verb phrase in Spacy?

For example:
Ultimate Swirly Ice Cream Scoopers are usually overrated when one considers all of the scoopers one could buy.
Here I'd like to pluck:
Subject: "Ultimate Swirly Ice Cream Scoopers"
Adverbial Clause: "When one considers all of the scoopers one could buy"
Verb Phrase: "are usually overrated"
I have the following functions for subject, object, and adverbial clause:
def get_subj(decomp):
for token in decomp:
if ("subj" in token.dep_):
subtree = list(token.subtree)
start = subtree[0].i
end = subtree[-1].i + 1
return str(decomp[start:end])
def get_obj(decomp):
for token in decomp:
if ("dobj" in token.dep_ or "pobr" in token.dep_):
subtree = list(token.subtree)
start = subtree[0].i
end = subtree[-1].i + 1
return str(decomp[start:end])
def get_advcl(decomp):
for token in decomp:
# print(f"pos: {token.pos_}; lemma: {token.lemma_}; dep: {token.dep_}")
if ("advcl" in token.dep_):
subtree = list(token.subtree)
start = subtree[0].i
end = subtree[-1].i + 1
return str(decomp[start:end])
phrase = "Ultimate Swirly Ice Cream Scoopers are usually overrated when one considers all of the scoopers one could buy."
nlp = spacy.load("en_core_web_sm")
decomp = nlp(phrase)
subj = get_subj(decomp)
obj = get_obj(decomp)
advcl = get_advcl(decomp)
print("subj: ", subj)
print("obj: ", obj)
print("advcl: ", advcl)
Output:
subj: Ultimate Swirly Ice Cream Scoopers
obj: all of the scoopers
advcl: when one considers all of the scoopers one could buy
However, the actual depenency type .dep_ for the final word of the VP, "are usually overrated", is "ROOT".
So, the subtree technique fails, as the subtree of ROOT returns the entire sentence.
You are wanting to construct something more like a “verb group” where you keep with the root verb only certain close dependents like aux, cop, and advmod but not ones like nsubj, obj, or advcl.

Regex Error and Improvement Driving Licence Data Extraction

I am trying to extract the Name, License No., Date Of Issue and Validity from an Image I processed using Pytesseract. I am quite a lot confused with regex but still went through few documentations and codes over the web.
I got till here:
import pytesseract
import cv2
import re
import cv2
from PIL import Image
import numpy as np
import datetime
from dateutil.relativedelta import relativedelta
def driver_license(filename):
"""
This function will handle the core OCR processing of images.
"""
i = cv2.imread(filename)
newdata=pytesseract.image_to_osd(i)
angle = re.search('(?<=Rotate: )\d+', newdata).group(0)
angle = int(angle)
i = Image.open(filename)
if angle != 0:
#with Image.open("ro2.jpg") as i:
rot_angle = 360 - angle
i = i.rotate(rot_angle, expand="True")
i.save(filename)
i = cv2.imread(filename)
# Convert to gray
i = cv2.cvtColor(i, cv2.COLOR_BGR2GRAY)
# Apply dilation and erosion to remove some noise
kernel = np.ones((1, 1), np.uint8)
i = cv2.dilate(i, kernel, iterations=1)
i = cv2.erode(i, kernel, iterations=1)
txt = pytesseract.image_to_string(i)
print(txt)
text = []
data = {
'firstName': None,
'lastName': None,
'age': None,
'documentNumber': None
}
c = 0
print(txt)
#Splitting lines
lines = txt.split('\n')
for lin in lines:
c = c + 1
s = lin.strip()
s = s.replace('\n','')
if s:
s = s.rstrip()
s = s.lstrip()
text.append(s)
try:
if re.match(r".*Name|.*name|.*NAME", s):
name = re.sub('[^a-zA-Z]+', ' ', s)
name = name.replace('Name', '')
name = name.replace('name', '')
name = name.replace('NAME', '')
name = name.replace(':', '')
name = name.rstrip()
name = name.lstrip()
nmlt = name.split(" ")
data['firstName'] = " ".join(nmlt[:len(nmlt)-1])
data['lastName'] = nmlt[-1]
if re.search(r"[a-zA-Z][a-zA-Z]-\d{13}", s):
data['documentNumber'] = re.search(r'[a-zA-Z][a-zA-Z]-\d{13}', s)
data['documentNumber'] = data['documentNumber'].group().replace('-', '')
if not data['firstName']:
name = lines[c]
name = re.sub('[^a-zA-Z]+', ' ', name)
name = name.rstrip()
name = name.lstrip()
nmlt = name.split(" ")
data['firstName'] = " ".join(nmlt[:len(nmlt)-1])
data['lastName'] = nmlt[-1]
if re.search(r"[a-zA-Z][a-zA-Z]\d{2} \d{11}", s):
data['documentNumber'] = re.search(r'[a-zA-Z][a-zA-Z]\d{2} \d{11}', s)
data['documentNumber'] = data['documentNumber'].group().replace(' ', '')
if not data['firstName']:
name = lines[c]
name = re.sub('[^a-zA-Z]+', ' ', name)
name = name.rstrip()
name = name.lstrip()
nmlt = name.split(" ")
data['firstName'] = " ".join(nmlt[:len(nmlt)-1])
data['lastName'] = nmlt[-1]
if re.match(r".*DOB|.*dob|.*Dob", s):
yob = re.sub('[^0-9]+', ' ', s)
yob = re.search(r'\d\d\d\d', yob)
data['age'] = datetime.datetime.now().year - int(yob.group())
except:
pass
print(data)
I need to extract the Validity and Issue Date as well. But not getting anywhere near it. Also, I have seen using regex shortens the code like a lot so is there any better optimal way for it?
My input data is a string somewhat like this:
Transport Department Government of NCT of Delhi
Licence to Drive Vehicles Throughout India
Licence No. : DL-0820100052000 (P) R
N : PARMINDER PAL SINGH GILL
: SHRI DARSHAN SINGH GILL
DOB: 10/05/1966 BG: U
Address :
104 SHARDA APPTT WEST ENCLAVE
PITAMPURA DELHI 110034
Auth to Drive Date of Issue
M.CYL. 24/02/2010
LMV-NT 24/02/2010
(Holder's Sig natu re)
Issue Date : 20/05/2016
Validity(NT) : 19/05/2021 : c
Validity(T) : NA Issuing Authority
InvCarrNo : NA NWZ-I, WAZIRPUR
Or like this:
in
Transport Department Government of NCT of Delhi
Licence to Drive Vehicles Throughout India
2
Licence No. : DL-0320170595326 () WN
Name : AZAZ AHAMADSIDDIQUIE
s/w/D : SALAHUDDIN ALI
____... DOB: 26/12/1992 BG: O+
\ \ Address:
—.~J ~—; ROO NO-25 AMK BOYS HOSTEL, J.
— NAGAR, DELHI 110025
Auth to Drive Date of Issue
M.CYL. 12/12/2017
4 wt 4
Iseue Date: 12/12/2017 a
falidity(NT) < 2037
Validity(T) +: NA /
Inv CarrNo : NA te sntian sana
Note: In the second example you wouldn't get the validity, will optimise the OCR for later. Any proper guide which can help me with regex which is a bit simpler would be good.
You can use this pattern: (?<=KEY\s*:\s*)\b[^\n]+ and replace KEY with one of the issues of the date, License No. and others.
Also for this pattern, you need to use regex library.
Code:
import regex
text1 = """
Transport Department Government of NCT of Delhi
Licence to Drive Vehicles Throughout India
Licence No. : DL-0820100052000 (P) R
N : PARMINDER PAL SINGH GILL
: SHRI DARSHAN SINGH GILL
DOB: 10/05/1966 BG: U
Address :
104 SHARDA APPTT WEST ENCLAVE
PITAMPURA DELHI 110034
Auth to Drive Date of Issue
M.CYL. 24/02/2010
LMV-NT 24/02/2010
(Holder's Sig natu re)
Issue Date : 20/05/2016
Validity(NT) : 19/05/2021 : c
Validity(T) : NA Issuing Authority
InvCarrNo : NA NWZ-I, WAZIRPUR
"""
for key in ('Issue Date', 'Licence No\.', 'N', 'Validity\(NT\)'):
print(regex.findall(fr"(?<={key}\s*:\s*)\b[^\n]+", text1, regex.IGNORECASE))
Output:
['20/05/2016']
['DL-0820100052000 (P) R']
['PARMINDER PAL SINGH GILL']
['19/05/2021 : c']
You can also use re with a single regex based on alternation that will capture your keys and values:
import re
text = "Transport Department Government of NCT of Delhi\nLicence to Drive Vehicles Throughout India\n\nLicence No. : DL-0820100052000 (P) R\nN : PARMINDER PAL SINGH GILL\n\n: SHRI DARSHAN SINGH GILL\n\nDOB: 10/05/1966 BG: U\nAddress :\n\n104 SHARDA APPTT WEST ENCLAVE\nPITAMPURA DELHI 110034\n\n\n\nAuth to Drive Date of Issue\nM.CYL. 24/02/2010\nLMV-NT 24/02/2010\n\n(Holder's Sig natu re)\n\nIssue Date : 20/05/2016\nValidity(NT) : 19/05/2021 : c\nValidity(T) : NA Issuing Authority\nInvCarrNo : NA NWZ-I, WAZIRPUR"
search_phrases = ['Issue Date', 'Licence No.', 'N', 'Validity(NT)']
reg = r"\b({})\s*:\W*(.+)".format( "|".join(sorted(map(re.escape, search_phrases), key=len, reverse=True)) )
print(re.findall(reg, text, re.IGNORECASE))
Output of this short online Python demo:
[('Licence No.', 'DL-0820100052000 (P) R'), ('N', 'PARMINDER PAL SINGH GILL'), ('Issue Date', '20/05/2016'), ('Validity(NT)', '19/05/2021 : c')]
The regex is
\b(Validity\(NT\)|Licence\ No\.|Issue\ Date|N)\s*:\W*(.+)
See its online demo.
Details:
map(re.escape, search_phrases) - escapes all special chars in your search phrases to be used as literal texts in a regex (else, . will match any chars, ? won't match a ? char, etc.)
sorted(..., key=len, reverse=True) - sorts the search phrases by length in descending order (to get longer matches first)
"|".join(...) - creates an alternation pattern, a|b|c|...
r"\b({})\s*:\W*(.+)".format( ... ) - creates the final regex.
Regex details
\b - a word boundary (NOTE: replace with (?m)^ if your matches occur at the beginning of a line)
(Validity\(NT\)|Licence\ No\.|Issue\ Date|N) - Group 1: one of the search phrases
\s* - zero or more whitespaces
: - a colon
\W* - zero or more non-word chars
(.+) - (capturing) Group 2: one or more chars other than line break chars, as many as possible.

Extracting particular word from existing Sentence

I have a following string
My idea is to extract particular word and update the same in different column. For e.g
I could not get an idea how to extract the same
my_string ="TT Load:Route1 TT Load for 30 out of 80 from Route 2"
description=my_string.split(":")[0]
route_start = my_string.find('Route')
route_end= my_string.find('Route')+6
route= my_string[route_start : route_end]
TTLoad = my_string[route_end:].split('TT Load')
res = [int(i) for i in TTLoad[1].split() if i.isdigit()]
TTLoad_value= res[0]
Out_Of = my_string[route_end:].split('out of')
res = [int(i) for i in Out_Of[1].split() if i.isdigit()]
Out_Of_value= res[0]
required_dict={ "GivenDescription":my_string,
"Description":description,
"Route": route,
"TTLoad" :TTLoad_value,
"Out of" : Out_Of_value}
df=pd.DataFrame.from_dict(required_dict,orient='index').T

how to skip 1 st line of payload - groovy

I have final payload in csv format with some amount of employee records. I have 2 main values to work with:
-Sequence field (added for each record)
-personal_id (unique for every employee)
Because of the fact that each employee can have multiple records, the need is to have according sequence number for each ID. In other words If employee with ID "123" have 5 records, the sequence value should be 1,2,3,4,5 instead of 1,1,1,1,1. All records are grouped by ID.
Also the payload below shows only 2 fields needed for the description, normally it has much more fields and is an example of static payload. Normally it will be dynamically.
here are the input payload:
Sequence;ID
123456
232323
232323
232323
111111
111111
222222
222222
222222
222222
222222
222222
222222
and here is expected payload
Sequence;ID
1;123456
1;232323
2;232323
3;232323
1;111111
2;111111
1;222222
2;222222
3;222222
4;222222
5;222222
6;222222
7;222222
Here is the actual payload after using groovy script:
1;Sequence;ID
1;123456
1;232323
2;232323
3;232323
1;111111
2;111111
1;222222
2;222222
3;222222
4;222222
5;222222
6;222222
7;222222
I am using the following groovy script but the problem is that the first line of payload is also numbered by 1. Can you show me how to skip the first line of payload?
The script is adding a number in every record to have it counted. The clue is to have all records of the same id counted from 1 incrementally. The script does that perfectly but the issue is as I said while i want to skip the first line od payload "Sequence;..." to not have it counted.
import com.sap.gateway.ip.core.customdev.util.Message
def Message processData(Message message) {
def payload = message.getBody(java.lang.String)
def prevId = ''
def sequence = 1
def sb = new StringBuilder()
def line = new StringReader()
payload.eachLine { line ->
def values = line.split(';')
if (values[1] != prevId) {
// New personal ID
sequence = 1
prevId = values[1]
} else {
// Another line of the same personal ID
sequence += 1
}
line = readLine()
values[0] = sequence
sb.append(values.join(';')).append(System.lineSeparator())
}
message.setBody(sb.toString())
return message
}
Assuming message is just a String, .eachLine can be given two arguments: the line and the index. You could use that to do whatever logic you want in your closure: skip index 0 entirely, output it as-is, etc.
Basic example:
payload.eachLine { line, idx ->
if (idx == 0) {
// do something with the first line
} else {
// everything you currently have
}
}

Using AI service to recognize a free text search field question?

Is there an API service, paid or not paid (IBM Watson, Google Natural Language), that can accept a free text "ask a question" field and convert it into a set of keywords to be used for a regular keyword search?
For example if my website has a search field "Ask a question about our products", and a user types in "Do you have red dresses?", is there an API we can integrate into our code that can just convert this to "red dress" which we then simply feed into our regular keyword search for "red dress"?
Ideally it can handle variations of questions such as:
"How do you return a product?" -- return product
"Do you accept Mastercard?" -- mastercard
"Where can I find blue shoes?" -- blue shoes
You can extract noun chunks and then use those as keywords.
For example using Spacy, you can extract noun chunks as follows:
import spacy
nlp = spacy.load('en_core_web_md')
def getNounChunks(doc):
inc = ['NN', 'NNP', 'NNPS', 'NNS', 'JJ', 'HYPH']
incn = ['NN', 'NNP', 'NNPS' ,'NNS']
excl = ['other', 'some', 'many', 'certain', 'various']
lspans = []
chunk =[]
for t in doc:
if t.text.lower() in excl:
continue
if chunk:
if chunk[-1].tag_ == 'HYPH':
chunk.append(t)
continue
if t.tag_ in inc:
if t.tag_ != 'JJ':
chunk.append(t)
else:
if not any([t.tag_ in incn for t in chunk]):
chunk.append(t)
else:
if chunk:
if any([t.tag_ in incn for t in chunk]):
lspans.append(doc[chunk[0].i:chunk[-1].i + 1])
chunk = list()
return(lspans)
questions = [
"How do you return a product?" ,
"Do you accept Mastercard?" ,
"Where can I find blue shoes?",
"Do you have red dresses?",]
for q in questions:
doc = nlp(q)
print(getNounChunks(doc))
#output:
#[product]
#[Mastercard]
#[blue shoes]
#[red dresses]

Resources