Place order with API IB - python-3.x

After deciding to walk away from Ibpy and learn the API IB I am having trouble even placing an order. What am I doing wrong?
from ibapi.client import EClient
from ibapi.wrapper import EWrapper
from ibapi.contract import Contract
from ibapi.order import *
#Def Market Order
class TestApp(EClient, EWrapper):
def __init__(self):
EClient.__init__(self, self)
def nextValidId(self, orderId:int):
self.nextOrderId = orderId
print(orderId)
self.start()
def start(self):
contract = Contract()
contract.symbol = "ES"
contract.secType = "FUT"
contract.exchange = "GLOBEX"
contract.currency = "USD"
contract.lastTradeDateOrContractMonth = "201903"
order = Order()
order.action = "BUY"
order.orderType = "MKT"
order.totalQuantity = 1
order.outsideRth = True
self.placeOrder(self.nextOrderId, contract, order)
def stop(self):
self.done = True
self.disconnect()
def main():
app=TestApp()
app.connect("127.0.0.1", 7496,420)
app.nextOrderId = 0
app.run()
if __name__ == "__main__":
main()
removed more Ibpy and replaced with ibapi
now getting
%Run basic.py
394
ERROR -1 2104 Market data farm connection is OK:usfuture
ERROR -1 2104 Market data farm connection is OK:usfarm
ERROR -1 2106 HMDS data farm connection is OK:ushmds
ERROR 394 200 No security definition has been found for the request

from ib.ext.Contract import Contract
from ib.ext.Order import Order
Those are from IbPy, use only classes that start with ibapi.

Related

emit bradcast message in callback with flask socketIO

I have trouble using Flask socketio. Here is the code I use:
import json
import time
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
from engineio.async_drivers import gevent
from flask_cors import CORS
from gevent.pywsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, cors_allowed_origins="*")
import queue
queue_notification_thread = queue.Queue()
def callback_notification(data):
print("callback device {}".format(data))
notification_thread = threading.Thread(target=notification_job, args=(data))
notification_thread.start()
queue_notification_thread.put(notification_thread)
def notification_job(data):
print("callback device in notification job {}".format(data))
socketio.emit("notification", data, broadcast=True)
#socketio.on('request')
def handle_message(data):
Logger.instance().debug('received message: {}'.format(data))
try:
if data.__contains__('data'):
response_message = dict()
response_message['Devices'] = dict()
response_message['Devices']['event'] = 'MY_EVENT'
socketio.emit('notification', response_message, broadcast=True)
else:
Logger.instance().error('Can\'t parse data {}'.format(data))
except OSError as err:
print('Error: when process {} \n ValueError {}'.format(data, err))
#socketio.on_error_default # handles all namespaces without an explicit error handler
def default_error_handler(e):
print('An error occured:')
print(e)
if __name__ == '__main__':
serialReader = SerialReader()
serialReader.start_reading(callback_notification)
http_server = WSGIServer(('', 5000), app, handler_class=WebSocketHandler)
http_server.serve_forever()
And the reader with call asynchronisly:
class SerialController:
serial_port: str
serial_device: serial.Serial
reading_thread: threading.Thread
device_name: str
def __init__(self, serial_port: str = "/dev/ttyACM0", baudrate=115200, read_timeout=0.2, device_name=''):
self.serial_port = serial_port
self.device_name = device_name
self.serial_device = serial.Serial(port=self.serial_port, baudrate=baudrate, timeout=0.2)
def start_reading(self, callback_function):
self.reading_callback = callback_function
# run the thread to
self.reading_thread = threading.Thread(target=self.read_job)
self.reading_thread.start()
def read_job(self):
available_data = 0
while True:
if self.serial_device.in_waiting > available_data:
available_data = self.serial_device.in_waiting
print('available_data {}'.format(available_data))
time.sleep(0.1)
else:
if available_data != 0:
data = self.serial_device.readall()
available_data = 0
if data != b'' and data != b'\n':
if self.reading_callback != None:
message = dict()
message["Reader"] = dict()
message["Reader"]["device"] = self.device_name
message["Reader"]["data"] = data
self.reading_callback(message)
time.sleep(1)
When I receive a message in #socketio.on('request') the bradcast emission work properly with no delay. When I use callback_notification called from my serial reader the breadcast emission have variable delay ( from 1seconde to 10 secondes).
On my server the message "callback device ..." is printed instantly but the client receive the message after few second.
I tried to the emission call in a thread like in the shown code but there is no improvment

Python instance variable seems to be shared between two objects

I'm trying to use shioaji API to create a Taiwan stock market trading application.
However, I found a strange behavior during the developement.
Here's my code:
import tkinter as tk
import os
import shioaji as sj
from shioaji import BidAskFOPv1, Exchange
from dotenv import load_dotenv
class TouchOrderBuy:
def __init__(self, api, contract):
print(f"{contract.symbol} TouchOrder init ...")
self.api = api
self.contract = contract
self.is_triggered = False
self.api.quote.subscribe(
contract=self.contract,
quote_type=sj.constant.QuoteType.BidAsk,
version=sj.constant.QuoteVersion.v1
)
print(f"self.quote_bidask_callback address: {hex(id(self.quote_bidask_callback))}")
self.api.quote.set_on_bidask_fop_v1_callback(self.quote_bidask_callback)
def quote_bidask_callback(self, exchange: Exchange, bidask: BidAskFOPv1):
print(f"{bidask.code} quote_bidask_callback")
print(f"self.is_triggered: {self.is_triggered}")
print(f"self.is_triggered address: {hex(id(self.is_triggered))}")
if bidask.code == 'TXO17500C2':
print(f"set self.is_triggered as True")
self.is_triggered = True
class TradingApp:
def __init__(self):
self.main_window = tk.Tk()
self.main_window.wm_title('test subscription')
self.initialize()
self.crate_gui()
self.main_window.mainloop()
def initialize(self):
self.api = sj.Shioaji(simulation=False)
login_info = self.api.login(
person_id=os.getenv('SHIOAJI_USERID'),
passwd=os.getenv("SHIOAJI_PASSWORD"),
contracts_timeout=10000,
contracts_cb=print,
fetch_contract=True
)
print('Login Done')
self.api.set_default_account(self.api.list_accounts()[2])
resp = self.api.activate_ca(
ca_path="Sinopac.pfx",
ca_passwd=os.getenv('CA_PASSWORD'),
person_id=os.getenv('SHIOAJI_USERID'),
)
def crate_gui(self):
sub_put_button = tk.Button(self.main_window, text='subs put', command=self.subscribe_put)
sub_put_button.pack()
sub_call_button = tk.Button(self.main_window, text='subs call', command=self.subscribe_call)
sub_call_button.pack()
def subscribe_call(self):
t_call = TouchOrderBuy(self.api, self.api.Contracts.Options.TXO.TXO202203017500C)
def subscribe_put(self):
t_put = TouchOrderBuy(self.api, self.api.Contracts.Options.TXO.TXO202203017500P)
if __name__ == '__main__':
load_dotenv()
app = TradingApp()
As you can see from the above code, I can create 2 TouchOrderBuy objects by first clicking subs put button, then clicking subs call button.
I found that when TouchOrderBuy's self.is_triggered of t_call becomes True, TouchOrderBuy's self.is_triggered of t_put also becomes True.
Why does this happen?
Here's a snippet of the output log:
According to this article, the contents of an instance variable are completely independent from one object instance to the other.
I tried to create a minimum reproducible code without using shioaji, but without success. I feel sorry about that.
Another question I have is why do t_put's and t_call's is_triggered variables refer to the same memory address.
I guess it's because that python has something similar to integer cache for boolean variables.
I also created another version of code to test if self.is_triggered is shared between t_put and t_call:
import tkinter as tk
import os
import shioaji as sj
from shioaji import BidAskFOPv1, Exchange
from dotenv import load_dotenv
class TouchOrderBuy:
def __init__(self, api, contract):
print(f"{contract.symbol} TouchOrder init ...")
self.api = api
self.contract = contract
self.is_triggered = False
def get_is_triggered(self):
print(f"is_triggered address: {hex(id(self.is_triggered))}")
return self.is_triggered
def set_is_triggered(self, value):
self.is_triggered = value
if __name__ == '__main__':
load_dotenv()
api = sj.Shioaji(simulation=False)
login_info = api.login(
person_id=os.getenv('SHIOAJI_USERID'),
passwd=os.getenv("SHIOAJI_PASSWORD"),
contracts_timeout=10000,
contracts_cb=print,
fetch_contract=True
)
print('Login Done')
api.set_default_account(api.list_accounts()[2])
resp = api.activate_ca(
ca_path="Sinopac.pfx",
ca_passwd=os.getenv('CA_PASSWORD'),
person_id=os.getenv('SHIOAJI_USERID'),
)
t_put = TouchOrderBuy(api, api.Contracts.Options.TXO.TXO202203017500P)
t_call = TouchOrderBuy(api, api.Contracts.Options.TXO.TXO202203017500C)
print(f"put is_triggered: {t_put.get_is_triggered()}")
print(f"call is_triggered: {t_call.get_is_triggered()}")
t_call.set_is_triggered(True)
print(f"put is_triggered: {t_put.get_is_triggered()}")
print(f"call is_triggered: {t_call.get_is_triggered()}")
In this version, when t_call's self.is_triggered is chagned, t_put's self.is_triggered does not change.
Here's its output:
TXO202203017500P TouchOrder init ...
TXO202203017500C TouchOrder init ...
is_triggered address: 0x7ffb2a9fa970
put is_triggered: False
is_triggered address: 0x7ffb2a9fa970
call is_triggered: False
is_triggered address: 0x7ffb2a9fa970
put is_triggered: False
is_triggered address: 0x7ffb2a9fa950
call is_triggered: True
Why does this version of code not have a weird behavior?

How to publish to Pub/Sub from Dataflow in batch (efficiently)?

I want to publish messages to a Pub/Sub topic with some attributes thanks to Dataflow Job in batch mode.
My dataflow pipeline is write with python 3.8 and apache-beam 2.27.0
It works with the #Ankur solution here : https://stackoverflow.com/a/55824287/9455637
But I think it could be more efficient with a shared Pub/Sub Client : https://stackoverflow.com/a/55833997/9455637
However an error occurred:
return StockUnpickler.find_class(self, module, name) AttributeError:
Can't get attribute 'PublishFn' on <module 'dataflow_worker.start'
from
'/usr/local/lib/python3.8/site-packages/dataflow_worker/start.py'>
Questions:
Would the shared publisher implementation improve beam pipeline performance?
Is there another way to avoid pickling error on my shared publisher client ?
My Dataflow Pipeline :
import apache_beam as beam
from apache_beam.io.gcp import bigquery
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from google.cloud.pubsub_v1 import PublisherClient
import json
import argparse
import re
import logging
class PubsubClient(PublisherClient):
def __reduce__(self):
return self.__class__, (self.batch_settings,)
# The DoFn to perform on each element in the input PCollection.
class PublishFn(beam.DoFn):
def __init__(self):
from google.cloud import pubsub_v1
batch_settings = pubsub_v1.types.BatchSettings(
max_bytes=1024, # One kilobyte
max_latency=1, # One second
)
self.publisher = PubsubClient(batch_settings)
super().__init__()
def process(self, element, **kwargs):
future = self.publisher.publish(
topic=element["topic"],
data=json.dumps(element["data"]).encode("utf-8"),
**element["attributes"],
)
return future.result()
def run(argv=None, save_main_session=True):
"""Main entry point; defines and runs the pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--source_table_id",
dest="source_table_id",
default="",
help="BigQuery source table <project>.<dataset>.<table> with columns (topic, attributes, data)",
)
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
# pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
bq_source_table = known_args.source_table_id
bq_table_regex = r"^(?P<PROJECT_ID>[a-zA-Z0-9_-]*)[\.|\:](?P<DATASET_ID>[a-zA-Z0-9_]*)\.(?P<TABLE_ID>[a-zA-Z0-9_-]*)$"
regex_match = re.search(bq_table_regex, bq_source_table)
if not regex_match:
raise ValueError(
f"Bad BigQuery table id : `{bq_source_table}` please match {bq_table_regex}"
)
table_ref = bigquery.TableReference(
projectId=regex_match.group("PROJECT_ID"),
datasetId=regex_match.group("DATASET_ID"),
tableId=regex_match.group("TABLE_ID"),
)
with beam.Pipeline(options=pipeline_options) as p:
(
p
| "ReadFromBqTable" #
>> bigquery.ReadFromBigQuery(table=table_ref, use_json_exports=True) # Each row contains : topic / attributes / data
| "PublishRowsToPubSub" >> beam.ParDo(PublishFn())
)
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
run()
After fussing with this a bit, I think I have an answer that works consistently and is, if not world-beatingly performant, at least tolerably usable:
import logging
import apache_beam as beam
from apache_beam.io.gcp.pubsub import PubsubMessage
from google.cloud.pubsub_v1 import PublisherClient
from google.cloud.pubsub_v1.types import (
BatchSettings,
LimitExceededBehavior,
PublishFlowControl,
PublisherOptions,
)
class PublishClient(PublisherClient):
"""
You have to override __reduce__ to make PublisherClient pickleable 😡 😤 🤬
Props to 'Ankur' and 'Benjamin' on SO for figuring this part out; god knows
I would not have...
"""
def __reduce__(self):
return self.__class__, (self.batch_settings, self.publisher_options)
class PubsubWriter(beam.DoFn):
"""
beam.io.gcp.pubsub does not yet support batch operations, so
we do this the hard way. it's not as performant as the native
pubsubio but it does the job.
"""
def __init__(self, topic: str):
self.topic = topic
self.window = beam.window.GlobalWindow()
self.count = 0
def setup(self):
batch_settings = BatchSettings(
max_bytes=1e6, # 1MB
# by default it is 10 ms, should be less than timeout used in future.result() to avoid timeout
max_latency=1,
)
publisher_options = PublisherOptions(
enable_message_ordering=False,
# better to be slow than to drop messages during a recovery...
flow_control=PublishFlowControl(limit_exceeded_behavior=LimitExceededBehavior.BLOCK),
)
self.publisher = PublishClient(batch_settings, publisher_options)
def start_bundle(self):
self.futures = []
def process(self, element: PubsubMessage, window=beam.DoFn.WindowParam):
self.window = window
self.futures.append(
self.publisher.publish(
topic=self.topic,
data=element.data,
**element.attributes,
)
)
def finish_bundle(self):
"""Iterate over the list of async publish results and block
until all of them have either succeeded or timed out. Yield
a WindowedValue of the success/fail counts."""
results = []
self.count = self.count + len(self.futures)
for fut in self.futures:
try:
# future.result() blocks until success or timeout;
# we've set a max_latency of 60s upstairs in BatchSettings,
# so we should never spend much time waiting here.
results.append(fut.result(timeout=60))
except Exception as ex:
results.append(ex)
res_count = {"success": 0}
for res in results:
if isinstance(res, str):
res_count["success"] += 1
else:
# if it's not a string, it's an exception
msg = str(res)
if msg not in res_count:
res_count[msg] = 1
else:
res_count[msg] += 1
logging.info(f"Pubsub publish results: {res_count}")
yield beam.utils.windowed_value.WindowedValue(
value=res_count,
timestamp=0,
windows=[self.window],
)
def teardown(self):
logging.info(f"Published {self.count} messages")
The trick is that if you call future.result() inside the process() method, you will block until that single message is successfully published, so instead collect a list of futures and then at the end of the bundle make sure they're all either published or definitively timed out. Some quick testing with one of our internal pipelines suggested that this approach can publish 1.6M messages in ~200s.

Getting Interactive Brokers API into Pandas

New to Python and IB API and stuck on this simple thing. This application works correctly and prints IB server reply. However, I cannot figure out how to get this data into a panda's dataframe or any other variable for that matter. How do you "get the data out?" Thanks!
Nothing on forums, documentation or youtube that I can find with a useful example. I think the answer must be to return accountSummary to pd.Series, but no idea how.
Expected output would be a data series or variable that can be manipulated outside of the application.
from ibapi import wrapper
from ibapi.client import EClient
from ibapi.utils import iswrapper #just for decorator
from ibapi.common import *
import pandas as pd
class TestApp(wrapper.EWrapper, EClient):
def __init__(self):
wrapper.EWrapper.__init__(self)
EClient.__init__(self, wrapper=self)
#iswrapper
def nextValidId(self, orderId:int):
print("setting nextValidOrderId: %d", orderId)
self.nextValidOrderId = orderId
# here is where you start using api
self.reqAccountSummary(9002, "All", "$LEDGER")
#iswrapper
def error(self, reqId:TickerId, errorCode:int, errorString:str):
print("Error. Id: " , reqId, " Code: " , errorCode , " Msg: " , errorString)
#iswrapper
def accountSummary(self, reqId:int, account:str, tag:str, value:str, currency:str):
print("Acct Summary. ReqId:" , reqId , "Acct:", account,
"Tag: ", tag, "Value:", value, "Currency:", currency)
#IB API data returns here, how to pass it to a variable or pd.series
#iswrapper
def accountSummaryEnd(self, reqId:int):
print("AccountSummaryEnd. Req Id: ", reqId)
# now we can disconnect
self.disconnect()
def main():
app = TestApp()
app.connect("127.0.0.1", 4001, clientId=123)
test = app.accountSummary
app.run()
if __name__ == "__main__":
main()
Hi had the same problem and collections did it for me. Here is my code for CFDs data. Maybe it will help somebody. You will have your data in app.df. Any suggestion for improvement are more than welcome.
import collections
import datetime as dt
from threading import Timer
from ibapi.client import EClient
from ibapi.wrapper import EWrapper
from ibapi.contract import Contract
import pandas as pd
# get yesterday and put it to correct format yyyymmdd{space}{space}hh:mm:dd
yesterday = str(dt.datetime.today() - dt.timedelta(1))
yesterday = yesterday.replace('-','')
IP = '127.0.0.1'
PORT = 7497
class App(EClient, EWrapper):
def __init__(self):
super().__init__(self)
self.data = collections.defaultdict(list)
def error(self, reqId, errorCode, errorString):
print(f'Error {reqId}, {errorCode}, {errorString}')
def historicalData(self, reqId, bar):
self.data['date'].append(bar.date)
self.data['open'].append(bar.open)
self.data['high'].append(bar.high)
self.data['low'].append(bar.low)
self.data['close'].append(bar.close)
self.data['volume'].append(bar.volume)
self.df = pd.DataFrame.from_dict(self.data)
def stop(self):
self.done = True
self.disconnect()
# create App object
app = App()
print('App created...')
app.connect(IP, PORT, 0)
print('App connected...')
# create contract
contract = Contract()
contract.symbol = 'IBDE30'
contract.secType = 'CFD'
contract.exchange = 'SMART'
contract.currency = 'EUR'
print('Contract created...')
# request historical data for contract
app.reqHistoricalData(reqId=1,
contract=contract,
endDateTime=yesterday,
durationStr='1 W',
barSizeSetting='15 mins',
whatToShow='ASK',
useRTH=0,
formatDate=1,
keepUpToDate=False,
chartOptions=[])
Timer(4, app.stop).start()
app.run()
I'd store the data to a dictionary, create a dataframe from the dictionary, and append the new dataframe to the main dataframe using the concat function. Here's an example:
def accountSummary(self, reqId:int, account:str, tag:str, value:str, currency:str):
acct_dict = {"account": account, "value": value, "currency": currency}
acct_df = pd.DataFrame([acct_dict], columns=acct_dict.keys())
main_df = pd.concat([main_df, acct_df], axis=0).reset_index()
For more information, you might like Algorithmic Trading with Interactive Brokers

Mocking REST APIs with Flask_restful using threading

I'm looking to mock a set of REST APIs for some tests. The following main() function works fine (i.e. it returns {"some-data": 1234} as json to the browser when I GET localhost:8099). The issue is it blocks the main thread:
from gevent import monkey, sleep, pywsgi
monkey.patch_all()
import flask
from flask_restful import reqparse, abort, Api, Resource
import queue
import sys
import threading
STUFFS = {"some-data": 1234}
class Stuff(Resource):
def get(self):
return flask.jsonify(STUFFS)
class ControlThread(threading.Thread):
def __init__(self, http_server, stop_event):
threading.Thread.__init__(self)
self.stop_event = stop_event
self.http_server = http_server
self.running = False
def run(self):
try:
while not self.stop_event.is_set():
if not self.running:
self.http_server.start()
self.running = True
sleep(0.001)
except (KeyboardInterrupt, SystemExit):
pass
self.http_server.stop()
class StuffMock:
def __init__(self, port, name=None):
if name is None:
name = __name__
self.app = flask.Flask(name)
self.api = Api(self.app)
self.api.add_resource(Stuff, "/stuff/")
self.stop_event = threading.Event()
self.http_server = pywsgi.WSGIServer(('', port), self.app)
self.serving_thread = ControlThread(self.http_server,
self.stop_event)
self.serving_thread.daemon = True
def start(self):
self.serving_thread.start()
def stop(self):
self.stop_event.set()
self.serving_thread.join()
def main():
mocker = StuffMock(8099)
mocker.start()
try:
while True:
sleep(0.01)
except (KeyboardInterrupt, SystemExit):
mocker.stop()
sys.exit()
if __name__ == "__main__":
main()
Without the sleep() call in the while loop above, nothing resolves. Here is a more succinct usage to demonstrate:
import time
from stuff_mock import StuffMock
mocker = StuffMock(8099)
mocker.start()
while True:
user_text = input("let's do some work on the main thread: ")
# will only resolve the GET request after user input
# (i.e. when the main thread executes this sleep call)
time.sleep(0.1)
if user_text == "q":
break
mocker.stop()
The gevent threading module seems to work differently from the core one. Does anyone have any tips or ideas about what's going on under the hood?
Found that if I switch out threading for multiprocessing (and threading.Thread for multiprocessing.Process), everything works as expected, and I can spin up arbitrary numbers of mockers without blocking.

Resources