Python scapy Beacon Frames - python-3.x

I'm trying to build a scapy program that scans for Beacon Frames. Every router should send beacon frames to the air in an interval of X milliseconds so the possible hosts know the router(AP) is alive.
I'm getting nothing, the only kind of Dot11 frames I've been able to get so far is Prob Request, very rarely some data or control frames as well. I setup my wireless card to monitor mode before running the script and it supports it as well. I don't what I might be doing wrong... Here's the code :
from scapy.all import *
global list_prob
list_prob = []
def search_prob(packet1):
if (packet1.haslayer(Dot11)) and (packet1[Dot11].type == 0) and\
(packet1[Dot11].subtype == 8) : #type 4 == ProbRequest
if packet1[Dot11].addr2 not in list_prob:
if packet1[Dot11].info not in list_prob:
print('[>]AP',packet1[Dot11].addr2,'SSID',packet1[Dot11].info)
list_prob.append(packet1[Dot11].addr2)
list_prob.append(packet1[Dot11].info)
sniff(iface='wlan0mon',prn=search_prob)
Ive also tried it with Dot11Beacon instead of subtype 8 and nothing changed . I'm programming with python3.5 on Linux.
Any ideas ?

Code to constantly change channel of network interface using python :
from threading import Thread
import subprocess,shlex,time
import threading
locky = threading.Lock()
def Change_Freq_channel(channel_c):
print('Channel:',str(channel_c))
command = 'iwconfig wlan1mon channel '+str(channel_c)
command = shlex.split(command)
subprocess.Popen(command,shell=False) # To prevent shell injection attacks !
while True:
for channel_c in range(1,15):
t = Thread(target=Change_Freq_channel,args=(channel_c,))
t.daemon = True
locky.acquire()
t.start()
time.sleep(0.1)
locky.release()

Related

Raspberry Pi Pico keeps crashing every since I started using both cores

I'm new to pico, only using arduinos before. I'm trying to make a simple rotary encoder program that displays a value from 0-12 on an 0.96 oled display, and lights up that many leds on a strip.
I wanted to try out using multiple cores, as interrupts made the leds not run smooth when I had them just cycling (everything would be paused while the encoder was being turned)
However, when I run this program, aside from the encoder being bouncy, the pico would crash maybe 30 seconds into running the program, making a mess on the display and stopping the code. I feel like there's some rule of using multiple cores that I completely ignored.
Here's the code:
from machine import Pin, I2C
from ssd1306 import SSD1306_I2C
import _thread
import utime
import neopixel
#general variables section
numOn = 0
#Encoder section
sw = Pin(12,Pin.IN,Pin.PULL_UP)
dt = Pin(11,Pin.IN)
clk = Pin(10,Pin.IN)
encodeCount = 0
lastClk = clk.value()
lastButton = False
#Encoder thread
def encoder(): #don't mind the indentation here,
#stackoverflow kinda messed up the code block a bit.
while True:
#import stuff that I shouldn't need to according to tutorials but it doesn't work without
global encodeCount
global lastClk
global clk
import utime
if clk.value() != lastClk:
if dt.value() != clk.value():
encodeCount += 1
else:
encodeCount -= 1
if encodeCount > 12:
encodeCount = 0
elif(encodeCount < 0):
encodeCount = 12
lastClk = clk.value()
print(encodeCount)
utime.sleep(0.01)
_thread.start_new_thread(encoder,())
#LED section
numLed = 12
ledPin = 26
led = neopixel.NeoPixel(machine.Pin(ledPin),numLed)
#Screen Section
WIDTH = 128
HEIGHT = 64
i2c = I2C(0,scl=Pin(17),sda=Pin(16),freq=200000)
oled = SSD1306_I2C(WIDTH,HEIGHT,i2c)
#loop
while True:
for i in range(numLed):
led[i] = (0,0,0)
for i in range(encodeCount):
led[i] = (100,0,0)
led.write()
#Display section
oled.fill(0)
oled.text(f'numLed: {numOn}',0,0)
oled.text(f'counter: {encodeCount}',0,40)
oled.show()
I'm probably doing something stupid here, I just don't know what.
Also, any suggestions on simply debouncing the encoder would be very helpful.
Any help would be appreciated! Thanks!
Update: The code above bricked the pico, so clearly I'm doing something very very wrong. _thread start line stopped it from crashing again, so the problem is there.
Same issue with very similar code on a Raspberry Pico W. I specify the 'W' because my code works without crashing on an earlier Pico.
I'm wondering if the low level networking functions might be using the 2nd core and causing a conflict.
I'm adding thread locking to see if passing a baton helps, the link below has an example, eg:
# create a lock
lck= _thread.allocate_lock()
# Function that will block the thread with a while loop
# which will simply display a message every second
def second_thread():
while True:
# We acquire the traffic light lock
lck.acquire()
print("Hello, I'm here in the second thread writting every second")
utime.sleep(1)
# We release the traffic light lock
lck.release()

Sniff RTS's and send CTS in return with Scapy

I'm able to sniff RTS packets without a problem. I'm also able to utilize 'sendp' to send CTS packets. What I'm unable to figure out is how to have Scapy sniff RTS packets and reply to those RTS's with a crafted CTS in real-time. The intent is to send a CTS for every RTS that my AWUS036ACH can hear regardless of the intended device.
import os
import time
from threading import Thread
from scapy.layers.all import Dot11,Dot11Elt,RadioTap,sniff,sendp
def change_channel():
ch = 1
while True:
try:
os.system(f"iwconfig {iface} channel {ch}")
ch = ch % 14 + 1
time.sleep(1)
except KeyboardInterrupt:
break
if __name__ == "__main__":
iface = "wlan0"
channel_changer = Thread(target=change_channel)
channel_changer.daemon = True
channel_changer.start()
def PacketHandler(packet):
if packet.haslayer(Dot11):
if packet.type==1 and packet.subtype==11:
rts_list.append(bssid)
bssid = packet[Dot11].addr2
print("MAC: %s" %(bssid))
sniff(iface=iface, prn=PacketHandler)
i=1
while 1:
time.sleep(.100)
i = i + 1
dot11 = Dot11(type=1, subtype=12, addr1=bssid,ID=0x99)
pkt = RadioTap()/dot11
sendp(pkt,iface=iface, realtime=True)
Why don't you try to add sendp inside your PacketHandler function?
The logic goes like this:
PacketHandler is called upon every received frame
You check whether it's an RTS frame, extract all of the necessary info you need to send a CTS frame
Call sendp with received info
There are ways to write ARP response utilities, take a look for ideas.
My concern is whether it's possible to send a frame while your adapter is put in monitor mode. Unfortunately I can't test it right now.
Recommendation. Try to use BPF filter with sniff. It goes like this:
sniff(iface=iface, filter="type ctl subtype rts", prn=PacketHandler)
And get rid of testing for frame type inside you PacketHandler. This way you will filter for RTS on a kernel level thus performance is increased. Scapy itself can easily miss RTS frames in a dense wireless environment. For more BPF filters applied to 802.11 check man pcap-filter.

Can't Send Command to BLE Device Philips Hue Bulb (Connection Drops)

I am trying to turn my bluetooth Hue bulb on/off and change brightness using my Raspberry Pi 4B. The bulb is on, and I have successfully connected to it using bluez. When I try to run 'char-write-req 0x0027 01' to turn it on, I get this message:
GLib-WARNING **: 22:53:34.807: Invalid file descriptor
I can see that the connection is successful, but whenever I try to write a value to it, I just get this message and it disconnects. Running bluetoothctl 5.50. I have seen the patch conversation here: https://www.spinics.net/lists/linux-bluetooth/msg67617.html. But I am not sure it applies and I also wouldn't even know how to apply it. Can someone please help me!
EDIT I ditched the gatttool and am using bluetoothctl to connect to the bulb and menu gatt to send commands to it.
I figured out that the characteristic for toggling the light on and off is 932c32bd-0002-47a2-835a-a8d455b859dd (For my Philips Hue A19). After connecting to the bulb, I was able to select this attribute and use 'write 01' to turn it on and 'write 00' to turn it off.
The brightness characteristic is 932c32bd-0002-47a2-835a-a8d455b859dd. When I read, it outputs 'fe', which is HEX for 254. This is the highest brightness setting, which it was already set to. I can use 'write ' where value ranges from 1-254 to change the brightness.
Using acquire-write in bluetoothctl is typically not the correct command. read and write are what you want.
After starting starting bluetoothctl I would expect the series of commands to be:
connect <Address of bulb>
menu gatt
select-attribute 932c32bd-0002-47a2-835a-a8d455b859dd
write 1
write 0
If you wanted to script this, then below is a Python3 script that I would expect to turn the bulb on then off.
from time import sleep
from pydbus import SystemBus
BLUEZ_SERVICE = 'org.bluez'
BLUEZ_DEV_IFACE = 'org.bluez.Device1'
BLUEZ_CHR_IFACE = 'org.bluez.GattCharacteristic1'
class Central:
def __init__(self, address):
self.bus = SystemBus()
self.mngr = self.bus.get(BLUEZ_SERVICE, '/')
self.dev_path = self._from_device_address(address)
self.device = self.bus.get(BLUEZ_SERVICE, self.dev_path)
self.chars = {}
def _from_device_address(self, addr):
"""Look up D-Bus object path from device address"""
mng_objs = self.mngr.GetManagedObjects()
for path in mng_objs:
dev_addr = mng_objs[path].get(BLUEZ_DEV_IFACE, {}).get('Address', '')
if addr.casefold() == dev_addr.casefold():
return path
def _get_device_chars(self):
mng_objs = self.mngr.GetManagedObjects()
for path in mng_objs:
chr_uuid = mng_objs[path].get(BLUEZ_CHR_IFACE, {}).get('UUID')
if path.startswith(self.dev_path) and chr_uuid:
self.chars[chr_uuid] = self.bus.get(BLUEZ_SERVICE, path)
def connect(self):
"""
Connect to device.
Wait for GATT services to be resolved before returning
"""
self.device.Connect()
while not self.device.ServicesResolved:
sleep(0.5)
self._get_device_chars()
def disconnect(self):
"""Disconnect from device"""
self.device.Disconnect()
def char_write(self, uuid, value):
"""Write value to given GATT characteristic UUID"""
if uuid.casefold() in self.chars:
self.chars[uuid.casefold()].WriteValue(value, {})
else:
raise KeyError(f'UUID {uuid} not found')
def char_read(self, uuid):
"""Read value of given GATT characteristic UUID"""
if uuid.casefold() in self.chars:
return self.chars[uuid.casefold()].ReadValue({})
else:
raise KeyError(f'UUID {uuid} not found')
device_address = '11:22:33:44:55:66'
light_state = '932c32bd-0002-47a2-835a-a8d455b859dd'
dev = Central(device_address )
dev.connect()
dev.char_write(light_state , [1])
sleep(5)
dev.char_write(light_state , [0])
print(dev.char_read(light_state ))
dev.disconnect()
As I don't have a bulb the above is untested. But should be a good outline of what is required.
It worked for me after I reset the Bulb with the bluetooth app to factory settings.
The bulb appears to be able to be paired/bonded to a single device only. If other devices try to communicate with the bulb, the connection is lost, as #ukBaz mentioned in his comment.

How to update the value of pymodbus tcp server according to the message subscribed by zmq?

I am a newbie. My current project is when the current end decides to start the modbus service, I will create a process for the modbus service. Then the value is obtained in the parent process, through the ZeroMQ PUB/SUB to pass the value, I now want to update the value of the modbus register in the modbus service process.
I tried the method mentioned by pymodbus provided by updating_server.py, and twisted.internet.task.LoopingCall() to update the value of the register, but this will make it impossible for me to connect to my server with the client. I don't know why?
Use LoopingCall() to establish the server, the log when the client connects.
Then I tried to put both the uploading and startTCPserver in the async loop, but the update was only entered for the first time after the startup, and then it was not entered.
Currently, I'm using the LoopingCall() to handle updates, but I don't think this is a good way.
This is the code I initialized the PUB and all the tags that can read the tag.
from loop import cycle
import asyncio
from multiprocessing import Process
from persistence import models as pmodels
from persistence import service as pservice
from persistence import basic as pbasic
import zmq
from zmq.asyncio import Context
from common import logging
from server.modbustcp import i3ot_tcp as sertcp
import common.config as cfg
import communication.admin as ca
import json
import os
import signal
from datetime import datetime
from server.opcuaserver import i3ot_opc as seropc
async def main():
future = []
task = []
global readers, readers_old, task_flag
logger.debug("connecting to database and create table.")
pmodels.connect_create()
logger.debug("init read all address to create loop task.")
cycle.init_readers(readers)
ctx = Context()
publisher = ctx.socket(zmq.PUB)
logger.debug("init publish [%s].", addrs)
publisher.bind(addrs)
readers_old = readers.copy()
for reader in readers:
task.append(asyncio.ensure_future(
cycle.run_readers(readers[reader], publisher)))
if not len(task):
task_flag = True
logger.debug("task length [%s - %s].", len(task), task)
opcua_server = LocalServer(seropc.opc_server, "opcua")
future = [
start_get_all_address(),
start_api(),
create_address_loop(publisher, task),
modbus_server(),
opcua_server.run()
]
logger.debug("run loop...")
await asyncio.gather(*future)
asyncio.run(main(), debug=False)
This is to get the device tag value and publish it.
async def run_readers(reader, publisher):
while True:
await reader.run(publisher)
class DataReader:
def __init__(self, freq, clients):
self._addresses = []
self._frequency = freq
self._stop_signal = False
self._clients = clients
self.signature = sign_data_reader(self._addresses)
async def run(self, publisher):
while not self._stop_signal:
for addr in self._addresses:
await addr.read()
data = {
"type": "value",
"data": addr._final_value
}
publisher.send_pyobj(data)
if addr._status:
if addr.alarm_log:
return_alarm_log = pbasic.get_log_by_time(addr.alarm_log['date'])
if return_alarm_log:
data = {
"type": "alarm",
"data": return_alarm_log
}
publisher.send_pyobj(data)
self.data_send(addr)
logger.debug("run send data")
await asyncio.sleep(int(self._frequency))
def stop(self):
self._stop_signal = True
modbus server imports
from common import logging
from pymodbus.server.asynchronous import StartTcpServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSequentialDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from persistence import service as pservice
from persistence import basic as pbasic
import zmq
import common.config as cfg
import struct
import os
import signal
from datetime import datetime
from twisted.internet.task import LoopingCall
def updating_writer(a):
logger.info("in updates of modbus tcp server.")
context = a[0]
# while True:
if check_pid(os.getppid()) is False:
os.kill(os.getpid(), signal.SIGKILL)
url = ("ipc://{}" .format(cfg.get('ipc', 'pubsub')))
logger.debug("connecting to [%s].", url)
ctx = zmq.Context()
subscriber = ctx.socket(zmq.SUB)
subscriber.connect(url)
subscriber.setsockopt(zmq.SUBSCRIBE, b"")
slave_id = 0x00
msg = subscriber.recv_pyobj()
logger.debug("updates.")
if msg['data']['data_type'] in modbus_server_type and msg['type'] == 'value':
addr = pservice.get_mbaddress_to_write_value(msg['data']['id'])
if addr:
logger.debug(
"local address and length [%s - %s].",
addr['local_address'], addr['length'])
values = get_value_by_type(msg['data']['data_type'], msg['data']['final'])
logger.debug("modbus server updates values [%s].", values)
register = get_register(addr['type'])
logger.debug(
"register [%d] local address [%d] and value [%s].",
register, addr['local_address'], values)
context[slave_id].setValues(register, addr['local_address'], values)
# time.sleep(1)
def tcp_server(pid):
logger.info("Get server configure and device's tags.")
st = datetime.now()
data = get_servie_and_all_tags()
if data:
logger.debug("register address space.")
register_address_space(data)
else:
logger.debug("no data to create address space.")
length = register_number()
store = ModbusSlaveContext(
di=ModbusSequentialDataBlock(0, [0] * length),
co=ModbusSequentialDataBlock(0, [0] * length),
hr=ModbusSequentialDataBlock(0, [0] * length),
ir=ModbusSequentialDataBlock(0, [0] * length)
)
context = ModbusServerContext(slaves=store, single=True)
identity = ModbusDeviceIdentification()
identity.VendorName = 'pymodbus'
identity.ProductCode = 'PM'
identity.VendorUrl = 'http://github.com/bashwork/pymodbus/'
identity.ProductName = 'pymodbus Server'
identity.ModelName = 'pymodbus Server'
identity.MajorMinorRevision = '2.2.0'
# ----------------------------------------------------------------------- #
# set loop call and run server
# ----------------------------------------------------------------------- #
try:
logger.debug("thread start.")
loop = LoopingCall(updating_writer, (context, ))
loop.start(1, now=False)
# process = Process(target=updating_writer, args=(context, os.getpid(),))
# process.start()
address = (data['tcp_ip'], int(data['tcp_port']))
nt = datetime.now() - st
logger.info("modbus tcp server begin has used [%s] s.", nt.seconds)
pservice.write_server_status_by_type('modbus', 'running')
StartTcpServer(context, identity=identity, address=address)
except Exception as e:
logger.debug("modbus server start error [%s].", e)
pservice.write_server_status_by_type('modbus', 'closed')
This is the code I created for the modbus process.
def process_stop(p_to_stop):
global ptcp_flag
pid = p_to_stop.pid
os.kill(pid, signal.SIGKILL)
logger.debug("process has closed.")
ptcp_flag = False
def ptcp_create():
global ptcp_flag
pid = os.getpid()
logger.debug("sentry pid [%s].", pid)
ptcp = Process(target=sertcp.tcp_server, args=(pid,))
ptcp_flag = True
return ptcp
async def modbus_server():
logger.debug("get mosbuc server's status.")
global ptcp_flag
name = 'modbus'
while True:
ser = pservice.get_server_status_by_name(name)
if ser['enabled']:
if ser['tcp_status'] == 'closed' or ser['tcp_status'] == 'running':
tags = pbasic.get_tag_by_name(name)
if len(tags):
if ptcp_flag is False:
logger.debug("[%s] status [%s].", ser['tcp_name'], ptcp_flag)
ptcp = ptcp_create()
ptcp.start()
else:
logger.debug("modbus server is running ...")
else:
logger.debug("no address to create [%s] server.", ser['tcp_name'])
pservice.write_server_status_by_type(name, "closed")
else:
logger.debug("[%s] server is running ...", name)
else:
if ptcp_flag:
process_stop(ptcp)
logger.debug("[%s] has been closed.", ser['tcp_name'])
pservice.write_server_status_by_type(name, "closed")
logger.debug("[%s] server not allowed to running.", name)
await asyncio.sleep(5)
This is the command that Docker runs.
/usr/bin/docker run --privileged --network host --name scout-sentry -v /etc/scout.cfg:/etc/scout.cfg -v /var/run:/var/run -v /sys:/sys -v /dev/mem:/dev/mem -v /var/lib/scout:/data --rm shulian/scout-sentry
This is the Docker configuration file /etc/scout.cfg.
[scout]
mode=product
[logging]
level=DEBUG
[db]
path=/data
[ipc]
cs=/var/run/scout-cs.sock
pubsub=/var/run/pubsub.sock
I want to be able to trigger the modbus value update function when there is a message coming from ZeroMQ, and it will be updated correctly.
Let's start from inside out.
Q : ...this will make it impossible for me to connect to my server with the client. I don't know why?
ZeroMQ is a smart broker-less messaging / signaling middleware or better a platform for smart-messaging. In case one feels not so much familiar with the art of Zen-of-Zero as present in ZeroMQ Architecture, one may like to start with ZeroMQ Principles in less than Five Seconds before diving into further details.
The Basis :
The Scalable Formal Communication Archetype, borrowed from ZeroMQ PUB/SUB, does not come at zero-cost.
This means that each infrastructure setup ( both on PUB-side and on SUB-side ) takes some, rather remarkable time and no one can be sure of when the AccessNode cnfiguration results in RTO-state. So the SUB-side (as proposed above) ought be either a permanent entity, or the user shall not expect to make it RTO in zero-time, after a twisted.internet.task.LoopingCall() gets reinstated.
Preferred way: instantiate your (semi-)persistent zmq.Context(), get it configured so as to serve the <aContextInstance>.socket( zmq.PUB ) as needed, a minimum safeguarding setup being the <aSocketInstance>.setsockopt( zmq.LINGER, 0 ) and all transport / queuing / security-handling details, that the exosystem exposes to your code ( whitelisting and secure sizing and resources protection being the most probable candidates - but details are related to your application domain and the risks that you are willing to face being prepared to handle them ).
ZeroMQ strongly discourages from sharing ( zero-sharing ) <aContextInstance>.socket()-instances, yet the zmq.Context()-instance can be shared / re-used (ref. ZeroMQ Principles... ) / passed to more than one threads ( if needed ).
All <aSocketInstance>{.bind()|.connect()}- methods are expensive, so try to setup the infrastructure AccessPoint(s) and their due error-handling way before one tries to use the their-mediated communication services.
Each <aSocketInstance>.setsockopt( zmq.SUBSCRIBE, ... ) is expensive in that it may take ( depending on (local/remote) version ) a form of a non-local, distributed-behaviour - local side "sets" the subscription, yet the remote side has to "be informed" about such state-change and "implements" the operations in line with the actual (propagated) state. While in earlier versions, all messages were dispatched from the PUB-side and all the SUB-side(s) were flooded with such data and were left for "filtering" which will be moved into a local-side internal-Queue, the newer versions "implement" the Topic-Filter on the PUB-side, which further increases the latency of setting the new modus-operandi in action.
Next comes the modus-operandi: how <aSocketInstance>.recv() gets results:
In their default API-state, .recv()-methods are blocking, potentially infinitely blocking, if no messages arrive.
Solution: avoid blocking-forms of calling ZeroMQ <aSocket>.recv()-methods by always using the zmq.NOBLOCK-modes thereof or rather test a presence or absence of any expected-message(s) with <aSocket>.poll( zmq.POLLIN, <timeout> )-methods available, with zero or controlled-timeouts. This makes you the master, who decides about the flow of code-execution. Not doing so, you knowingly let your code depend on external sequence ( or absence ) of events and your architecture is prone to awful problems with handling infinite blocking-states ( or potential unsalvageable many-agents' distributed behaviour live-locks or dead-locks )
Avoid uncontrolled cross-breeding of event-loops - like passing ZeroMQ-driven-loops into an external "callback"-alike handler or async-decorated code-blocks, where the stack of (non-)blocking logics may wreck havoc the original idea just by throwing the system into an unresolvable state, where events miss expected sequence of events and live-locks are unsalvagable or just the first pass happen to go through.
Stacking asyncio-code with twisted-LoopingCall()-s and async/await-decorated code + ZeroMQ blocking .recv()-s is either a Piece-of-Filligrane-Precise-Art-of-Truly-a-Zen-Master, or a sure ticket to Hell - with all respect to the Art-of-Truly-Zen-Masters :o)
So, yes, complex thinking is needed -- welcome to the realms of distributed-computing!

Obtain bluetooth signal strength on Raspberry Pi of BT device without pairing

I like to create a kind of indoor-tracking-system for my already existing home automation system. I thought of using BLE. I already successfully set up hcitool on my Raspberry Pi and I can connect to my iPhone without any problems. But how can I obtain the signal strength between my Raspberry Pi and my iPhone without connecting them. I already tried to use sudo hcitool cc [BTADDRESS] to connect to my iPhone without authentication, but it looks like the iPhone don't allow those connection to stay open. I think that must be a way to get the signal strength without connecting both devices. I want to use it to determine the distance from my Raspberry Pi to my iPhone. May I am able to calculate the distance from the time I need to discover my iPhone?
There are two ways to go, and by now I have been able to get both work reliably only on Android devices.
Exploiting the Bluetooth friendly name of the smartphone and set the discoverability to infinite. I have done this writing a simple app. Works in background, also after that the app has been killed, since the discoverability setting is preserved. At the best of my knowledge, this is not possible in iOS.
Advertising a UUID in a BLE packet from the phone. This can be done by both Android and iOS devices. However, while in background, iPhones switch the advertising to a shrinked mode that makes the packet unidentifiable. The problem of identifying an advertising iOS devices in background is still open.
On the raspberry, I used PyBluez to scan and looking for the presence of smartphones running (1) or (2). I report a code example:
import bluetooth
import bluetooth._bluetooth as bluez
import struct, socket, sys, select
def hci_enable_le_scan(sock):
hci_toggle_le_scan(sock, 0x01)
#Discover name and RSS of enabled BLE devices
class MyDiscoverer(bluetooth.DeviceDiscoverer):
def pre_inquiry(self):
self.done = False
def device_discovered(self, address, device_class, rssi, name):
discovery_logger.info("Discovered %s" % (address, ))
if name == "YOUR-DEVICE-FRIENDLY_NAME":
#Use the RSS for your detection / localization system
def inquiry_complete(self):
self.done = True
#Performs inquiry for name request
def async_inquiry():
d = MyDiscoverer()
while True:
d.find_devices(lookup_names = True)
readfiles = [ d, ]
while True:
rfds = select.select( readfiles, [], [] )[0]
if d in rfds:
d.process_event()
if d.done:
break
time.sleep(DISCOVERY_INTERVAL)
#Parse received advertising packets
def parse_events(sock):
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
flt = bluez.hci_filter_new()
bluez.hci_filter_all_events(flt)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
while True:
pkt = sock.recv(255)
ptype, event, plen = struct.unpack("BBB", pkt[:3])
if event == LE_META_EVENT:
subevent, = struct.unpack("B", pkt[3])
pkt = pkt[4:]
if subevent == EVT_LE_CONN_COMPLETE:
le_handle_connection_complete(pkt)
elif subevent == EVT_LE_ADVERTISING_REPORT:
#Check if the advertisement is the one we are searching for
if getASCII(pkt[start:end]) == "YOUR-UUID"
report_pkt_offset = 0
report_data_length, = struct.unpack("B", pkt[report_pkt_offset + 9])
# each report is 2 (event type, bdaddr type) + 6 (the address)
# + 1 (data length field) + report_data length + 1 (rssi)
report_pkt_offset = report_pkt_offset + 10 + report_data_length + 1
rssi, = struct.unpack("b", pkt[report_pkt_offset -1])
#Now you have the RSS indicator, use it for monitoring / localization
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
dev_id = 0
try:
sock = bluez.hci_open_dev(dev_id)
except:
print "error accessing bluetooth device..."
sys.exit(1)
p = threading.Thread(group=None, target=parse_events, name='parsing', args=(sock, ))
d = threading.Thread(group=None, target=async_inquiry, name='async_inquiry', args=())
try:
p.start()
except:
print "Error: unable to start parsing thread"
try:
d.start()
except:
print "Error: unable to start asynchronous discovery thread"

Resources