How to iterate over window objects to add them to a DataFrame? - python-3.x

I have an object, it seems to be a window object, EWM [com=9.5,min_periods=0,adjust=True,ignore_na=False,axis=0], it was created from a dataframe predictions_df_list["prices"] to be a one with dates as index and exponential weighted average of prices as values. I wanted to add it to a dataframe: predictions_df_list['ewma']. Yet it raised a NotImplementedError in inferring:
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
<ipython-input-21-b1286fe39d1c> in <module>
---> 59 predictions_df_list['ewma'] = pd.DataFrame.ewm(predictions_df_list["prices"], span=20) #pd.DataFrame.ewma
60 predictions_df_list['actual_value'] = test['prices']
61 predictions_df_list['actual_value_ewma'] = pd.DataFrame.ewm(predictions_df_list["actual_value"], span=20)
C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\frame.py in __setitem__(self, key, value)
3117 else:
3118 # set column
-> 3119 self._set_item(key, value)
3120
3121 def _setitem_slice(self, key, value):
C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\frame.py in _set_item(self, key, value)
3192
3193 self._ensure_valid_index(value)
-> 3194 value = self._sanitize_column(key, value)
3195 NDFrame._set_item(self, key, value)
3196
C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\frame.py in _sanitize_column(self, key, value, broadcast)
3385 value = _sanitize_index(value, self.index, copy=False)
3386
-> 3387 elif isinstance(value, Index) or is_sequence(value):
3388 from pandas.core.series import _sanitize_index
3389
C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\dtypes\inference.py in is_sequence(obj)
470
471 try:
--> 472 iter(obj) # Can iterate over it.
473 len(obj) # Has a length associated with it.
474 return not isinstance(obj, string_and_binary_types)
C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\window.py in __iter__(self)
184 def __iter__(self):
185 url = 'https://github.com/pandas-dev/pandas/issues/11704'
--> 186 raise NotImplementedError('See issue #11704 {url}'.format(url=url))
187
188 def _get_index(self, index=None):
NotImplementedError: See issue #11704 https://github.com/pandas-dev/pandas/issues/11704
When looking for documentation on window objects which seems that window objects are Python2 objects. Anyway here is predictions_df_list["prices"] which I am working with for a reproducing the error :
2007-11-01 14021.1
2007-11-02 13825.1
2007-11-03 13533.1
2007-11-04 14021.1
2007-11-05 13345.1
2007-11-06 12578.1
2007-11-07 14021.1
2007-11-08 13533.1
2007-11-09 12678.1
2007-11-10 12578.1
2007-11-11 14021.1
2007-11-12 13825.1
2007-11-13 13533.1
2007-11-14 12661.1
2007-11-15 13320.1
2007-11-16 12678.1
2007-11-17 12775.1
2007-11-18 13533.1
2007-11-19 13868.1
2007-11-20 12581.1
2007-11-21 13345.1
2007-11-22 13533.1
2007-11-23 12678.1
2007-11-24 13533.1
2007-11-25 12684.1
2007-11-26 13825.1
2007-11-27 14021.1
2007-11-28 14021.1
2007-11-29 12678.1
2007-11-30 12578.1
...
2007-12-02 13320.1
2007-12-03 12661.1
2007-12-04 13533.1
2007-12-05 12578.1
2007-12-06 13533.1
2007-12-07 13533.1
2007-12-08 14021.1
2007-12-09 12639.1
2007-12-10 12661.1
2007-12-11 13345.1
2007-12-12 12578.1
2007-12-13 14021.1
2007-12-14 13345.1
2007-12-15 13533.1
2007-12-16 12895.1
2007-12-17 13686.1
2007-12-18 14052.1
2007-12-19 14021.1
2007-12-20 13686.1
2007-12-21 12730.1
2007-12-22 13686.1
2007-12-23 12586.1
2007-12-24 12741.1
2007-12-25 12678.1
2007-12-26 13533.1
2007-12-27 12775.1
2007-12-28 12578.1
2007-12-29 12661.1
2007-12-30 12895.1
2007-12-31 12639.1
Freq: D, Name: prices, Length: 61, dtype: float64

Your ewma values can be found by using the EMA object you have and calling .mean() on it.
df['ewm'] = df['values'].ewm(alpha=0.001).mean()

Related

Value Error: index must be monotonic increasing or decreasing while using resample('M') function on datetime values

I am stuck at this point in my code. I am trying to divide the startdate and enddate into multiple rows based on months and for that I am trying to use the resample function to sample the dates on monthly basis. The sample code looks like this-
PS- A lot of the BCA_REF, STARTDATE, ENDDATE values are repeated and are not unique owing to the usecase
df = pd.DataFrame(
data = [['abc','2018-08-01','2025-07-31'], ['abc','2018-08-01','2025-07-31'],['xyz','2017-04-01','2017-04-01'], ['xyz','2017-04-01','2017-04-01'], ['pqr','2016-05-16','2017-10-15']],
columns = ['BCA_REF', 'STARTDATE', 'ENDDATE']
)
df['STARTDATE'] = pd.to_datetime(df['STARTDATE'])
df['ENDDATE'] = pd.to_datetime(df['ENDDATE'])
df_start_end = df.melt(id_vars=['BCA_REF'],value_vars=['STARTDATE','ENDDATE'], value_name='date')
df_new = (
df_start_end.groupby(['BCA_REF'])
.apply(lambda x: x.drop_duplicates('date').set_index('date')
.resample('M').pad())
.drop(columns=['BCA_REF','variable'])
.reset_index()
)
After I run this for 40K such rows, it gives me the following error
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/tmp/ipykernel_15069/2048245652.py in <module>
4 merged_final_new = (
5 mf_start_end.groupby(['BCA_REF'])
----> 6 .apply(lambda x: x.drop_duplicates('date').set_index('date')
7 .resample('M').pad())
8 # .drop(columns=['BCA_REF','variable'])
~/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in apply(self, func, *args, **kwargs)
1273 with option_context("mode.chained_assignment", None):
1274 try:
-> 1275 result = self._python_apply_general(f, self._selected_obj)
1276 except TypeError:
1277 # gh-20949
~/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in _python_apply_general(self, f, data)
1307 data after applying f
1308 """
-> 1309 keys, values, mutated = self.grouper.apply(f, data, self.axis)
1310
1311 return self._wrap_applied_output(
~/.local/lib/python3.7/site-packages/pandas/core/groupby/ops.py in apply(self, f, data, axis)
850 # group might be modified
851 group_axes = group.axes
--> 852 res = f(group)
853 if not _is_indexed_like(res, group_axes, axis):
854 mutated = True
/tmp/ipykernel_15069/2048245652.py in <lambda>(x)
5 mf_start_end.groupby(['BCA_REF'])
6 .apply(lambda x: x.drop_duplicates('date').set_index('date')
----> 7 .resample('M').pad())
8 # .drop(columns=['BCA_REF','variable'])
9 # .reset_index()
~/.local/lib/python3.7/site-packages/pandas/core/resample.py in pad(self, limit)
507 DataFrame.fillna: Fill NA/NaN values using the specified method.
508 """
--> 509 return self._upsample("pad", limit=limit)
510
511 ffill = pad
~/.local/lib/python3.7/site-packages/pandas/core/resample.py in _upsample(self, method, limit, fill_value)
1204 else:
1205 result = obj.reindex(
-> 1206 res_index, method=method, limit=limit, fill_value=fill_value
1207 )
1208
~/.local/lib/python3.7/site-packages/pandas/util/_decorators.py in wrapper(*args, **kwargs)
322 #wraps(func)
323 def wrapper(*args, **kwargs) -> Callable[..., Any]:
--> 324 return func(*args, **kwargs)
325
326 kind = inspect.Parameter.POSITIONAL_OR_KEYWORD
~/.local/lib/python3.7/site-packages/pandas/core/frame.py in reindex(self, *args, **kwargs)
4770 kwargs.pop("axis", None)
4771 kwargs.pop("labels", None)
-> 4772 return super().reindex(**kwargs)
4773
4774 #deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"])
~/.local/lib/python3.7/site-packages/pandas/core/generic.py in reindex(self, *args, **kwargs)
4817 # perform the reindex on the axes
4818 return self._reindex_axes(
-> 4819 axes, level, limit, tolerance, method, fill_value, copy
4820 ).__finalize__(self, method="reindex")
4821
~/.local/lib/python3.7/site-packages/pandas/core/frame.py in _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy)
4596 if index is not None:
4597 frame = frame._reindex_index(
-> 4598 index, method, copy, level, fill_value, limit, tolerance
4599 )
4600
~/.local/lib/python3.7/site-packages/pandas/core/frame.py in _reindex_index(self, new_index, method, copy, level, fill_value, limit, tolerance)
4612 ):
4613 new_index, indexer = self.index.reindex(
-> 4614 new_index, method=method, level=level, limit=limit, tolerance=tolerance
4615 )
4616 return self._reindex_with_indexers(
~/.local/lib/python3.7/site-packages/pandas/core/indexes/base.py in reindex(self, target, method, level, limit, tolerance)
3824 if self._index_as_unique:
3825 indexer = self.get_indexer(
-> 3826 target, method=method, limit=limit, tolerance=tolerance
3827 )
3828 else:
~/.local/lib/python3.7/site-packages/pandas/core/indexes/base.py in get_indexer(self, target, method, limit, tolerance)
3484 )
3485
-> 3486 return self._get_indexer(target, method, limit, tolerance)
3487
3488 def _get_indexer(
~/.local/lib/python3.7/site-packages/pandas/core/indexes/base.py in _get_indexer(self, target, method, limit, tolerance)
3506
3507 if method in ["pad", "backfill"]:
-> 3508 indexer = self._get_fill_indexer(target, method, limit, tolerance)
3509 elif method == "nearest":
3510 indexer = self._get_nearest_indexer(target, limit, tolerance)
~/.local/lib/python3.7/site-packages/pandas/core/indexes/base.py in _get_fill_indexer(self, target, method, limit, tolerance)
3582 indexer = engine_method(target_values, limit)
3583 else:
-> 3584 indexer = self._get_fill_indexer_searchsorted(target, method, limit)
3585 if tolerance is not None and len(self):
3586 indexer = self._filter_indexer_tolerance(target_values, indexer, tolerance)
~/.local/lib/python3.7/site-packages/pandas/core/indexes/base.py in _get_fill_indexer_searchsorted(self, target, method, limit)
3606 indexer = self.get_indexer(target)
3607 nonexact = indexer == -1
-> 3608 indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side)
3609 if side == "left":
3610 # searchsorted returns "indices into a sorted array such that,
~/.local/lib/python3.7/site-packages/pandas/core/indexes/base.py in _searchsorted_monotonic(self, label, side)
5763 return len(self) - pos
5764
-> 5765 raise ValueError("index must be monotonic increasing or decreasing")
5766
5767 def get_slice_bound(self, label, side: str_t, kind=None) -> int:
ValueError: index must be monotonic increasing or decreasing
I tried to look for solutions for this error wherein people suggested using sort_index()/sort_values() for the 'date' column but it still does not work. I believe the issue is with the resample function.
Any help would be appreciated. Thank you

how to handle snstwitter Keyerror "player_stream_content_type" in python?

I am collecting historic tweets using sntwitter (ref: https://betterprogramming.pub/how-to-scrape-tweets-with-snscrape-90124ed006af). For some of the keyword searches, I am getting the error "player_stream_content_type". I got the source code for the module on github (https://github.com/JustAnotherArchivist/snscrape/blob/master/snscrape/modules/twitter.py) but I am unable to figure out how to handle the error. Any suggestions on how to handle this is highly appreciated.
import tweepy
import pandas as pd
import os
import snscrape.modules.twitter as sntwitter
from langdetect import detect, DetectorFactory
DetectorFactory.seed = 0
# Creating list to append tweet data to
tweets_list1 = []
for i,tweet in enumerate(sntwitter.TwitterSearchScraper('itv since:2017-03-06 until:2017-04-03').get_items()):
if tweet.lang=="en":
tweets_list1.append([tweet.date, tweet.id, tweet.rawContent, tweet.user.username,tweet.user.id,
tweet.user.followersCount, tweet.user.friendsCount,tweet.user.location,
tweet.replyCount, tweet.retweetCount,tweet.likeCount,tweet.quoteCount,
tweet.hashtags,tweet.inReplyToUser,tweet.mentionedUsers
])
print(len(tweets_list1))
tweets_df2 = pd.DataFrame(tweets_list1, columns=['Datetime', 'Tweet Id', 'Text', 'Username',
'user_id','user_followers_count','user_friends_count',
'user_location','reply_count','retweet_count','like_count',
'quote_count','hashtags',
'is_reply_to','mentioned_users'])
Error message:
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-14-716543e9be2b> in <module>
2 tweets_list1 = []
3
----> 4 for i,tweet in enumerate(sntwitter.TwitterSearchScraper('itv since:2017-03-06 until:2017-04-03').get_items()):
5 if tweet.lang=="en":
6
/opt/anaconda3/envs/p38/lib/python3.8/site-packages/snscrape/modules/twitter.py in get_items(self)
1448
1449 for obj in self._iter_api_data('https://api.twitter.com/2/search/adaptive.json', _TwitterAPIType.V2, params, paginationParams, cursor = self._cursor):
-> 1450 yield from self._v2_timeline_instructions_to_tweets(obj)
1451
1452 #classmethod
/opt/anaconda3/envs/p38/lib/python3.8/site-packages/snscrape/modules/twitter.py in _v2_timeline_instructions_to_tweets(self, obj, includeConversationThreads)
802 for entry in entries:
803 if entry['entryId'].startswith('sq-I-t-') or entry['entryId'].startswith('tweet-'):
--> 804 yield from self._v2_instruction_tweet_entry_to_tweet(entry['entryId'], entry['content'], obj)
805 elif includeConversationThreads and entry['entryId'].startswith('conversationThread-') and not entry['entryId'].endswith('-show_more_cursor'):
806 for item in entry['content']['timelineModule']['items']:
/opt/anaconda3/envs/p38/lib/python3.8/site-packages/snscrape/modules/twitter.py in _v2_instruction_tweet_entry_to_tweet(self, entryId, entry, obj)
825 else:
826 raise snscrape.base.ScraperException(f'Unable to handle entry {entryId!r}')
--> 827 yield self._tweet_to_tweet(tweet, obj)
828
829 def _get_tweet_id(self, tweet):
/opt/anaconda3/envs/p38/lib/python3.8/site-packages/snscrape/modules/twitter.py in _tweet_to_tweet(self, tweet, obj)
1267 kwargs['quotedTweet'] = self._tweet_to_tweet(obj['globalObjects']['tweets'][tweet['quoted_status_id_str']], obj)
1268 if 'card' in tweet:
-> 1269 kwargs['card'] = self._make_card(tweet['card'], _TwitterAPIType.V2, self._get_tweet_id(tweet))
1270 return self._make_tweet(tweet, user, **kwargs)
1271
/opt/anaconda3/envs/p38/lib/python3.8/site-packages/snscrape/modules/twitter.py in _make_card(self, card, apiType, tweetId)
1113 video = Video(
1114 thumbnailUrl = bindingValues['player_image'],
-> 1115 variants = [VideoVariant(contentType = bindingValues['player_stream_content_type'], url = bindingValues['amplify_url_vmap'], bitrate = None)],
1116 ),
1117 )
KeyError: 'player_stream_content_type'

Featuretools TypeError: unhashable type: 'Int64Index'

I am trying to create an entity set from 3 dataframes and while doing so I am getting the error: TypeError: unhashable type: 'Int64Index'
I have searched the Internet for similar issues but could not find any issues related to datetime types. Please note that none of the columns of df_raw_view_logs are unique and hence none of the columns can be used as index value and hence the dataframe.index is being used.
I am sharing the dtypes for the dataframe for which it is throwing error when I make a column from it as a time index.
df_raw_view_logs.dtypes
server_time datetime64[ns]
device_type int8
session_id int64
user_id int64
item_id int64
dtype: object
es = ft.EntitySet()
es = es.entity_from_dataframe(entity_id="train",
dataframe=df_es_train,
index=df_es_train.index,
time_index="impression_time",
)
es = es.entity_from_dataframe(entity_id="viewlogs",
dataframe=df_es_view_logs,
index=df_es_view_logs.index,
time_index="server_time",
)
es = es.entity_from_dataframe(entity_id="itemdata",
dataframe=df_es_item_data,
index=df_es_item_data.index,
)
new_relationship = ft.Relationship(es["train"]["user_id"],
es["viewlogs"]["user_id"])
es = es.add_relationship(new_relationship)
new_relationship_1 = ft.Relationship(es["viewlogs"]["item_id"],
es["itemdata"]["item_id"])
es = es.add_relationship(new_relationship_1)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-32-81425e9b87c5> in <module>
9 dataframe=df_es_view_logs,
10 index=df_es_view_logs.index,
---> 11 time_index="server_time",
12 )
13
D:\Anaconda3\envs\fastai\lib\site-packages\featuretools\entityset\entityset.py in entity_from_dataframe(self, entity_id, dataframe, index, variable_types, make_index, time_index, secondary_time_index, already_sorted)
495 secondary_time_index=secondary_time_index,
496 already_sorted=already_sorted,
--> 497 make_index=make_index)
498 self.entity_dict[entity.id] = entity
499 self.reset_data_description()
D:\Anaconda3\envs\fastai\lib\site-packages\featuretools\entityset\entity.py in __init__(self, id, df, entityset, variable_types, index, time_index, secondary_time_index, last_time_index, already_sorted, make_index, verbose)
67 """
68 _validate_entity_params(id, df, time_index)
---> 69 created_index, index, df = _create_index(index, make_index, df)
70
71 self.id = id
D:\Anaconda3\envs\fastai\lib\site-packages\featuretools\entityset\entity.py in _create_index(index, make_index, df)
547 # Case 3: user wanted to make index but column already exists
548 raise RuntimeError("Cannot make index: index variable already present")
--> 549 elif index not in df.columns:
550 if not make_index:
551 # Case 4: user names index, it is not in df. does not specify
D:\Anaconda3\envs\fastai\lib\site-packages\pandas\core\indexes\base.py in __contains__(self, key)
3917 #Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
3918 def __contains__(self, key):
-> 3919 hash(key)
3920 try:
3921 return key in self._engine
D:\Anaconda3\envs\fastai\lib\site-packages\pandas\core\indexes\base.py in __hash__(self)
3932
3933 def __hash__(self):
-> 3934 raise TypeError("unhashable type: %r" % type(self).__name__)
3935
3936 def __setitem__(self, key, value):
TypeError: unhashable type: 'Int64Index'
This is erroring because the index argument is supposed to be a string that is the name of the column in your DataFrame that is the index. Not the index values themselves.

How to impute values in a column and overwrite existing values

Im trying to learn machine learning and i need to fill in the missing values for the cleaning stage of the workflow. i have 13 columns and need to impute the values for 8 of them. One column is called Dependents and i want to fill in the blanks with the word missing and change the cells that do contain data as follows: 1 to one, two to 2, 3 to three and 3+ to threePlus.
Im running the program in Anaconda and the name of the dataframe is train
train.columns
this gives me
Index(['Loan_ID', 'Gender', 'Married', 'Dependents', 'Education',
'Self_Employed', 'ApplicantIncome', 'CoapplicantIncome', 'LoanAmount',
'Loan_Amount_Term', 'Credit_History', 'Property_Area', 'Loan_Status'],
dtype='object')
next
print("Dependents")
print(train['Dependents'].unique())
this gives me
Dependents
['0' '1' '2' '3+' nan]
now i try imputing values as stated
def impute_dependent():
my_dict={'1':'one','2':'two','3':'three','3+':'threePlus'};
return train.Dependents.map(my_dict).fillna('missing')
def convert_data(dataset):
temp_data = dataset.copy()
temp_data['Dependents'] = temp_data[['Dependents']].apply(impute_dependent,axis=1)
return temp_data
this gives the error
TypeError Traceback (most recent call last)
<ipython-input-46-ccb1a5ea7edd> in <module>()
4 return temp_data
5
----> 6 train_dataset = convert_data(train)
7 #test_dataset = convert_data(test)
<ipython-input-46-ccb1a5ea7edd> in convert_data(dataset)
1 def convert_data(dataset):
2 temp_data = dataset.copy()
----> 3 temp_data['Dependents'] =
temp_data[['Dependents']].apply(impute_dependent,axis=1)
4 return temp_data
5
D:\Anaconda2\lib\site-packages\pandas\core\frame.py in apply(self, func,
axis, broadcast, raw, reduce, result_type, args, **kwds)
6002 args=args,
6003 kwds=kwds)
-> 6004 return op.get_result()
6005
6006 def applymap(self, func):
D:\Anaconda2\lib\site-packages\pandas\core\apply.py in get_result(self)
140 return self.apply_raw()
141
--> 142 return self.apply_standard()
143
144 def apply_empty_result(self):
D:\Anaconda2\lib\site-packages\pandas\core\apply.py in apply_standard(self)
246
247 # compute the result using the series generator
--> 248 self.apply_series_generator()
249
250 # wrap results
D:\Anaconda2\lib\site-packages\pandas\core\apply.py in
apply_series_generator(self)
275 try:
276 for i, v in enumerate(series_gen):
--> 277 results[i] = self.f(v)
278 keys.append(v.name)
279 except Exception as e:
TypeError: ('impute_dependent() takes 0 positional arguments but 1 was
given', 'occurred at index 0')
i expected one, two , three and threePlus to replace the existing values and missing to fill in the blanks
Would this do?
my_dict = {'1':'one','2':'two','3':'three','3+':'threePlus', np.nan: 'missing'}
def convert_data(dataset):
temp_data = dataset.copy()
temp_data.Dependents = temp_data.Dependents.map(my_dict)
return temp_data
As a side note, part of your problem might be the use of apply: essentially apply passes data through a function and puts in what comes out. I might be wrong but I think your function needs to take the input given by apply, eg:
def impute_dependent(dep):
my_dict = {'1':'one','2':'two','3':'three','3+':'threePlus', np.nan: 'missing'}
return my_dict[dep]
df.dependents = df.dependents.apply(impute_dependents)
This way, for every value in df.dependents, apply will take that value and give it to impute_dependents as an argument, then take the returned value as output. As is, when I trial your code I get an error because impute_dependent takes no arguments.

ServerSelectionTimeoutError: basicdatabase-w4eg3.mongodb.net:27017: [Errno 11001] getaddrinfo failed

I don't understand why this error is occurring in my code while connecting the code to the database of MongoDB. Here is my code:-
import Credentials
client = Credentials.client
db = client.VisitorBook
company = input('Enter Company Name: ')
result=db.Company_name.insert_one({'Name':company})
print(result.inserted_id)
And this is my Credentials file which I have made seperately and this Credentials module is called in the above code:
from pymongo import MongoClient
from bson.objectid import ObjectId
client = MongoClient('mongodb+srv://Username:<Password>#basicdatabase-w4eg3.mongodb.net/test?retryWrites=true');
After entering my username and password when I execute my code it shows this error:
---------------------------------------------------------------------------
ServerSelectionTimeoutError Traceback (most recent call last)
<ipython-input-10-754b7e794657> in <module>()
5
6 company = input('Enter Company Name: ')
----> 7 result=db.Company_name.insert_one({'Name':company})
8 print(result.inserted_id)
C:\ProgramData\Anaconda3\lib\site-packages\pymongo\collection.py in insert_one(self, document, bypass_document_validation, session)
681 self._insert(document,
682 bypass_doc_val=bypass_document_validation,
--> 683 session=session),
684 self.write_concern.acknowledged)
685
C:\ProgramData\Anaconda3\lib\site-packages\pymongo\collection.py in _insert(self, docs, ordered, check_keys, manipulate, write_concern, op_id, bypass_doc_val, session)
597 return self._insert_one(
598 docs, ordered, check_keys, manipulate, write_concern, op_id,
--> 599 bypass_doc_val, session)
600
601 ids = []
C:\ProgramData\Anaconda3\lib\site-packages\pymongo\collection.py in _insert_one(self, doc, ordered, check_keys, manipulate, write_concern, op_id, bypass_doc_val, session)
577
578 result = self.__database.client._retryable_write(
--> 579 True, _insert_command, session)
580 _check_write_command_response(result)
581 else:
C:\ProgramData\Anaconda3\lib\site-packages\pymongo\mongo_client.py in _retryable_write(self, retryable, func, session)
1099 def _retryable_write(self, retryable, func, session):
1100 """Internal retryable write helper."""
-> 1101 with self._tmp_session(session) as s:
1102 return self._retry_with_session(retryable, func, s, None)
1103
C:\ProgramData\Anaconda3\lib\contextlib.py in __enter__(self)
79 def __enter__(self):
80 try:
---> 81 return next(self.gen)
82 except StopIteration:
83 raise RuntimeError("generator didn't yield") from None
C:\ProgramData\Anaconda3\lib\site-packages\pymongo\mongo_client.py in _tmp_session(self, session, close)
1405 return
1406
-> 1407 s = self._ensure_session(session)
1408 if s and close:
1409 with s:
C:\ProgramData\Anaconda3\lib\site-packages\pymongo\mongo_client.py in _ensure_session(self, session)
1392 # Don't make implied sessions causally consistent. Applications
1393 # should always opt-in.
-> 1394 return self.start_session(causal_consistency=False)
1395 except (ConfigurationError, InvalidOperation):
1396 # Sessions not supported, or multiple users authenticated.
C:\ProgramData\Anaconda3\lib\site-packages\pymongo\mongo_client.py in start_session(self, causal_consistency)
1370
1371 # Raises ConfigurationError if sessions are not supported.
-> 1372 server_session = self._get_server_session()
1373 opts = client_session.SessionOptions(
1374 causal_consistency=causal_consistency)
C:\ProgramData\Anaconda3\lib\site-packages\pymongo\mongo_client.py in _get_server_session(self)
1378 def _get_server_session(self):
1379 """Internal: start or resume a _ServerSession."""
-> 1380 return self._topology.get_server_session()
1381
1382 def _return_server_session(self, server_session, lock):
C:\ProgramData\Anaconda3\lib\site-packages\pymongo\topology.py in get_server_session(self)
425 any_server_selector,
426 self._settings.server_selection_timeout,
--> 427 None)
428 elif not self._description.readable_servers:
429 self._select_servers_loop(
C:\ProgramData\Anaconda3\lib\site-packages\pymongo\topology.py in _select_servers_loop(self, selector, timeout, address)
197 if timeout == 0 or now > end_time:
198 raise ServerSelectionTimeoutError(
--> 199 self._error_message(selector))
200
201 self._ensure_opened()
ServerSelectionTimeoutError: basicdatabase-w4eg3.mongodb.net:27017: [Errno 11001] getaddrinfo failed
Hi change this code portion like this,
import Credentials
client = Credentials.client
db = client['VisitorBook']
collection = db['Company_name']
company = input('Enter Company Name: ')
result = collection.insert_one({'Name':company})
print(result.inserted_id)
or try to run mongod.exe manualy. Sometimes you can see the Mongod.exe console gets stuck, in such cases hit the enter button inside the mongod console to refresh.

Resources