I keep getting an attribute error from the following code.
import csv
myFile = open('countries.csv', 'w')
with myFile:
myFields = ['country', 'capital']
writer = csv.DictWriter(myFile, fieldnames=myFields)
writer.writeheader()
writer.writerow({'country' : 'France', 'capital': 'Paris'})
writer.writerow({'country' : 'Italy', 'capital': 'Rome'})
writer.writerow({'country' : 'Spain', 'capital': 'Madrid'})
writer.writerow({'country' : 'Russia', 'capital': 'Moscow'})
any ideas
Related
I am building a telegram-bot and using Dialogflow in it, I am getting the following error :
2021-11-19 23:26:46,936 - __main__ - ERROR - Update '{'message': {'entities': [],
'delete_chat_photo': False, 'text': 'hi', 'date': 1637344606, 'new_chat_members': [],
'channel_chat_created': False, 'message_id': 93, 'photo': [], 'group_chat_created':
False, 'supergroup_chat_created': False, 'new_chat_photo': [], 'caption_entities': [],
'chat': {'id': 902424541, 'type': 'private', 'first_name': 'Akriti'},
'from': {'first_name': 'Akriti', 'id': 902424541, 'is_bot': False, 'language_code': 'en'}
}, 'update_id': 624230278}' caused error 'module 'google.cloud.dialogflow' has no
attribute 'types''
It appears there is some issue with the Dialogflow attribute "types", but I don't know what I am doing wrong.
Here is the code where I am using it:
import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "client.json"
from google.cloud import dialogflow
dialogflow_session_client = dialogflow.SessionsClient()
PROJECT_ID = "get-informed-ufnl"
from gnewsclient import gnewsclient
client = gnewsclient.NewsClient()
def detect_intent_from_text(text, session_id, language_code='en'):
session = dialogflow_session_client.session_path(PROJECT_ID, session_id)
text_input = dialogflow.types.TextInput(text=text, language_code=language_code)
query_input = dialogflow.types.QueryInput(text=text_input)
response = dialogflow_session_client.detect_intent(session=session, query_input=query_input)
return response.query_result
def get_reply(query, chat_id):
response = detect_intent_from_text(query, chat_id)
if response.intent.display_name == 'get_news':
return "get_news", dict(response.parameters)
else:
return "small_talk", response.fulfillment_text
def fetch_news(parameters):
client.language = parameters.get('language')
client.location = parameters.get('geo-country')
client.topic = parameters.get('topic')
return client.get_news()[:5]
topics_keyboard = [
['Top Stories', 'World', 'Nation'],
['Business', 'Technology', 'Entertainment'],
['Sports', 'Science', 'Health']
]
I figured it out, the problem lies in the import statement. The correct module name should be:
import google.cloud.dialogflow_v2 as dialogflow
I recommend to deactivate your current error handler or use one similar to this example such that you can see the full traceback of the exception :)
Disclaimer: I'm currently the maintainer of python-telegram-bot
This is a Python Program to get all the captions from youtube link:
from pytube import YouTube
yt = YouTube('https://youtu.be/5MgBikgcWnY')
captions = yt.captions.all()
for caption in captions:
print(caption)
and the output of the above program is:
<Caption lang="Arabic" code="ar">
<Caption lang="Chinese (China)" code="zh-CN">
<Caption lang="English" code="en">
<Caption lang="English (auto-generated)" code="a.en">
<Caption lang="French" code="fr">
<Caption lang="German" code="de">
<Caption lang="Hungarian" code="hu">
<Caption lang="Italian" code="it">
But I want to get only the lang and code from the above output in a dictionary pair.
{"Arabic" : "ar", "Chinese" : "zh-CN", "English" : "en",
"French : "fr", "German" : "de", "Hungarian" : "hu", "Italian" : "it"}
Thanks in Advance.
It's pretty simple
from pytube import YouTube
yt = YouTube('https://youtu.be/5MgBikgcWnY')
captions = yt.captions.all()
captions_dict = {}
for caption in captions:
# Mapping the caption name to the caption code
captions_dict[caption.name] = caption.code
If you want a one-liner
captions_dict = {caption.name: caption.code for caption in captions}
Output
{'Arabic': 'ar', 'Bangla': 'bn', 'Burmese': 'my', 'Chinese (China)': 'zh-CN',
'Chinese (Taiwan)': 'zh-TW', 'Croatian': 'hr', 'English': 'en',
'English (auto-generated)': 'a.en', 'French': 'fr', 'German': 'de',
'Hebrew': 'iw', 'Hungarian': 'hu', 'Italian': 'it', 'Japanese': 'ja',
'Persian': 'fa', 'Polish': 'pl', 'Portuguese (Brazil)': 'pt-BR',
'Russian': 'ru', 'Serbian': 'sr', 'Slovak': 'sk', 'Spanish': 'es',
'Spanish (Spain)': 'es-ES', 'Thai': 'th', 'Turkish': 'tr',
'Ukrainian': 'uk', 'Vietnamese': 'vi'}
I use ytapi to get info from searching a YouTube video and I get a videoId in terminal. So i am trying to get that so from that i can open the YouTube video.
Here is what i've done:
from apiclient.discovery import build
youtube = build("youtube" , "v3" , developerKey = api_key)
req = youtube.search().list(q="google",part="id",type="video",fields="items/id")
res = req.execute()
print(res)
Output:
{'items': [{'id': {'kind': 'youtube#video', 'videoId': 'XKmsYB54zBk'}}, {'id': {'kind': 'youtube#video', 'videoId': 'd6nwLctqB3c'}}, {'id': {'kind': 'youtube#video', 'videoId': 'F95wcsJAz0c'}}, {'id': {'kind': 'youtube#video', 'videoId': 'p0ysH2Glw5w'}}, {'id': {'kind': 'youtube#video', 'videoId': 'GvmDVq3PtA8'}}]}
From that is there a way to get only the videoId so i can use it to open videos with
webbrowser.open("https://www.youtube.com/watch?v= " + videoId)
This is what you are looking for
from apiclient.discovery import build
youtube = build("youtube" , "v3" , developerKey = api_key)
req = youtube.search().list(q="google",part="id",type="video",fields="items/id")
res = req.execute()
urladd = res['items'][0]['id']['videoId']
webbrowser.open("https://www.youtube.com/watch?v= " + urladd)
When I import a JSON from a URL it says it has an extra character. Below is the:
Code:
import requests
import json
response = json.loads(requests.get("https://s3-us-west-2.amazonaws.com/anand-fhir-json/2e3b7aa0-0c90-4b31-8967-680f1438e03e").text)
print(res1);
JSON format:
{"id":{"s":"2"},"managingOrganization":{"s":"{'reference': 'Organization/hl7'}"},"address":{"s":"[{'use': 'home', 'line': ['2222 Home Street']}]"},"name":{"s":"[{'use': 'official', 'family': 'Everyman', 'given': ['Adam']}]"},"telecom":{"s":"[{'system': 'phone', 'value': '555-555-2004', 'use': 'work'}]"},"gender":{"s":"male"},"active":{"s":"true"},"birthDate":{"s":"null"},"meta":{"s":"{'lastUpdated': '2012-05-29T23:45:32Z'}"},"resourceType":{"s":"Patient"}}
{"id":{"s":"8"},"managingOrganization":{"s":"{'reference': 'Organization/hl7'}"},"address":{"s":"[{'use': 'home', 'line': ['4444 Home Street']}]"},"name":{"s":"[{'use': 'official', 'family': 'Mum', 'given': ['Martha']}]"},"telecom":{"s":"[{'system': 'phone', 'value': '555-555-2006', 'use': 'work'}]"},"gender":{"s":"female"},"active":{"s":"true"},"birthDate":{"s":"null"},"meta":{"s":"{'lastUpdated': '2012-05-29T23:45:32Z'}"},"resourceType":{"s":"Patient"}}
I need to export a nested dictionary to CSV. Here's what each entry looks like (that needs to be one line in the csv later):
{'createdTime': '2017-10-30T12:33:02.000Z',
'fields': {'Date': '2017-10-30T12:32:56.000Z',
'field1': 'example#gmail.com',
'field2': 1474538185964188,
'field3': 6337,
....},
'id': 'reca7LBr64XM1ClWy'}
I think I need to iterate through the dictionary and create a list of lists(?) to create the csv from using the csv module.
['Date', 'field1', 'field2', 'field3', ...],
['2017-10-30T12:32:56.000Z', 'example#gmail.com', 1474538185964188, 6337 ...]
My problem is to find a smart way to iterate through the dict to get to a list like this.
You can get the values in the below way:
def process_data():
csv_data = [{'createdTime': '2017-10-30T12:33:02.000Z',
'fields': {'Date': '2017-10-30T12:32:56.000Z',
'field1': 'example#gmail.com',
'field2': 1474538185964188,
'field3': 6337},
'id': 'reca7LBr64XM1ClWy'},
{'createdTime': '2017-10-30T12:33:02.000Z',
'fields': {'Date': '2017-10-30T12:32:56.000Z',
'field1': 'example#gmail.com',
'field2': 1474538185964188,
'field3': 6337},
'id': 'reca7LBr64XM1ClWy'}]
headers = [key for key in csv_data[0]['fields'].keys()]
body = []
for row in csv_data:
body_row = []
for colomn_header in headers:
body_row.append(row['fields'][colomn_header])
body.append(body_row)
process_data()
#header -- ['Date', 'field1', 'field2', 'field3']
#body -- [['2017-10-30T12:32:56.000Z', 'example#gmail.com',
# 1474538185964188, 6337], ['2017-10-30T12:32:56.000Z',
# 'example#gmail.com', 1474538185964188, 6337]]