Related
So my program compares images and deletes any that are the same, renaming the original to how many were deleted. This program works fine on my Linux (ubuntu) but when used on my Windows 8 laptop I get the error at the bottom, any advice is greatly appreciated.
PIL.UnidentifiedImageError: cannot identify image file 'C:\\Users\\Public\\Pictures\\Sample Pictures/desktop.ini'
(there is more to the error code if you need it, also I import os, send2trash, pil, shutil and tkinter to use with the script)
import os
from PIL import ImageChops, ImageDraw
import PIL.Image
import shutil
from send2trash import send2trash
from tkinter import *
root = Tk()
root.title("Cupcake 3.0")
paths = Entry(root, width = 50, borderwidth = 5)
paths.grid(row = 2, column =2, columnspan =3)
path = paths.get()
def add():
end_image = PIL.Image.new(mode = "RGB",size = (200, 70), color = "red")
end_image.save(paths.get() +"/zzz999.png")
files = os.listdir(paths.get())
files.sort()
image1 =files[0]
counter = 1
im1 = " "
for file in files:
original_file_name, file_ext = (os.path.splitext(file))
if delete == 0:
ofn, fe = (os.path.splitext(image1))
if image1 == file:
pass
elif image1 != file:
im1 = PIL.Image.open(paths.get() + "/" + image1).histogram() #the image to be compared too
im2 = PIL.Image.open(paths.get() + "/" + file).histogram()#file for comparison
if im1 == im2:
send2trash(paths.get() + "/" +file)
counter = counter +1
elif im1 != im2:
os.rename(paths.get() + "/" + image1, paths.get() + "/" + ofn + "_X" +str(counter)+ fe)
counter = 1
image1 = file
else:
print("something went wrong")
else:
addd = input("Enter in the text you wish to be added/removed (can not contain blank spaces): ").strip()
new_name = "{}{}{}".format(original_file_name, addd, file_ext).strip()
os.rename(path + "/" + file, path + "/" + new_name)
print("Job Complete")
send2trash(paths.get() + "/zzz999.png")
welcome = Label(root, text = "Welcome to Cupcake 3.0")
welcome.grid(row = 0, column =2, columnspan =3)
l1 = Label (root, text = "Please enter the path to your folder: ")
l1.grid(row = 1, column =2, columnspan =3)
paths = Entry(root, width = 50, borderwidth = 5)
paths.grid(row = 2, column =2, columnspan =3)
path = paths.get()
d = IntVar()
Radiobutton(root, text = "Add", variable = d, value = 1, anchor = W).grid(row = 3, column = 2, sticky = W)
Radiobutton(root, text = "Remove", variable = d, value = 2,anchor = W).grid(row = 4, column = 2, sticky = W)
def_question = d.get()
l2 = Label (root, text = "Enter text you want removed (leave blank if n/a): ").grid(row = 6, column = 2)
rem = Entry(root, width = 50, borderwidth = 5)
rem.grid(row = 7, column =2, columnspan =3)
remo=rem.get().strip()
de = IntVar()
c = Checkbutton(root, text = "Do you wish to delete duplicate files?", variable = de)
c.grid(row = 8, column =2, sticky = W)
delete = de.get()
def run():
#print(d.get())
#print(def_question)
if d.get() == 1:
add()
elif d.get() == 2:
remove()
else:
print("I don't understand your command")
def remove():
files = os.listdir(paths.get())
files.sort()
for file in files:
original_file_name, file_ext = (os.path.splitext(file))
#print(remo)
new_name = original_file_name.replace(rem.get(), "").strip()
os.rename(paths.get() + "/" + file, paths.get() + "/" + new_name)
print("Job Complete")
submit = Button(root, text = "Run", command =lambda: run())
submit.grid(row = 9, column =2)
root.mainloop()
"
Thats my entire program
Try to filter out the files which are not identified as images by PIL. Something like this might work for you
def image_filter(filename):
file_path = os.path.join(paths.get(), filename)
try:
im = PIL.Image.open(file_path)
im.verify()
im.close()
return True
except: # you can handle PIL.UnidentifiedImageError here
return False
def add():
...
files = os.listdir(path)
# filter out the files which are not images
files = list(filter(image_filter, files))
files.sort()
image1 = files[0]
...
I'm trying to execute a piece of code I found online and it is giving me the following error.
I'm new to opencv so please help me.
error:
<ipython-input-1-7fe9c579ec14> in image_masking(filepath)
15 gray = cv2.imread(filepath,0)
16 edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2)
---> 17 edges = cv2.dilate(edges,None)
18 edges = cv2.erode(edges, None)
19
error: OpenCV(3.4.1) C:\Miniconda3\conda-bld\opencv-
suite_1533128839831\work\modules\core\src\matrix.cpp:760: error: (-215)
dims <= 2 && step[0] > 0 in function cv::Mat::locateROI
code:
import cv2
import numpy as np
def image_masking(filepath):
BLUR = 21
CANNY_THRESH_1 = 100
CANNY_THRESH_2 = 100
MASK_DILATE_ITER = 10
MASK_ERODE_ITER = 10
MASK_COLOR = (0.0,0.0,0.0) # In BGR format
gray = cv2.imread(filepath,0)
edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2)
edges = cv2.dilate(edges,None)
edges = cv2.erode(edges, None)
contour_info = []
_, contours, __ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for c in contours:
contour_info.append((c, cv2.isContourConvex(c), cv2.contourArea(c),))
contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)
max_contour = contour_info[0]
for c in contour_info:
cv2.fillConvexPoly(mask, c[0], (255))
mask = cv2.dilate(mask, None, iterations=MASK_DILATE_ITER)
mask = cv2.erode(mask, None, iterations=MASK_ERODE_ITER)
mask = cv2.GaussianBlur(mask, (BLUR, BLUR), 0)
mask_stack = np.dstack([mask]*3)
mask_stack = mask_stack.astype('float32') / 255.0
img = img.astype('float32') / 255.0
masked = (mask_stack * img) + ((1-mask_stack) * MASK_COLOR)
masked = (masked * 255).astype('uint8')
fileName, fileExtension = filepath.split('.')
fileName += '-masked.'
filepath = fileName + fileExtension
print(filepath)
cv2.imwrite(filepath, masked)
if __name__ == '__main__':
filepath = 'C:\\Users\HP\Downloads\test3.jpg'
image_masking(filepath)
i tried replacing None in dilate function with kernel but it is giving me the same error
The second argument to cv2.dilate and cv2.erode should be the kernel with which you want to perform dilation/erosion as it is shown in the docs: opencv documentation
For example, you can try to do it like that:
kernel = np.ones((3, 3), np.uint8)
edges = cv2.dilate(edges, kernel)
edges = cv2.erode(edges, kernel)
Good luck with further opencv exploration!
I'm creating a object classifier in opencv python using svm. Training dataset is of 200 positive and 200 negative images. For positive images first took 200 images and cropped target object from images and resized them to (64,128) size for HOG calculation. Then for negative images, First created Pyramid of images then applied sliding window of 64X128 and then calculated HOG for positive as well all windows of negative images with labels 1 and 0. Trained svm model on hog features.
I am getting error "cv2.error: OpenCV(3.4.2) C:\projects\opencv-python\opencv\modules\ml\src\svm.cpp:2010: error: (-215:Assertion failed) samples.cols == var_count && samples.type() == 5 in function 'cv::ml::SVMImpl::predict' " when i called predict function using res = svm.predict(samples[0]).ravel() method.
import cv2
import os
import time
import numpy as np
import imutils
positive_path='C:\\Users\\Admin\\3D Objects\\datqaet with hog and svm\\ROI images'
negative_path='C:\\Users\\Admin\\3D Objects\\datqaet with hog and svm\\Negative images'
def pyramid(img): #Create image Pyramid
minSize=(30, 30)
imgarr = []
while True:
scale = 2
imgarr.append(img)
w = int(img.shape[1] / scale)
img = imutils.resize(img, width=w)
if img.shape[0] < minSize[1] or img.shape[1] < minSize[0]:
break
return imgarr
def sliding_window(image, stepSize, windowSize): #Sliding window for negative images
sliding = []
for y in range(0, image.shape[0], stepSize):
for x in range(0, image.shape[1], stepSize):
sliding.append((x, y, image[y:y + windowSize[1], x:x + windowSize[0]]))
return sliding
def get_hog() :
winSize = (64,128)
blockSize = (16,16)
blockStride = (16,16)
cellSize = (8,8)
nbins = 9
derivAperture = 1
winSigma = 4.
histogramNormType = 0
L2HysThreshold = 0.2
gammaCorrection = 0
nlevels = 64
signedGradient = True
hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma,histogramNormType,L2HysThreshold,gammaCorrection,nlevels, signedGradient)
return hog
samples = []
labels = []
sam = []
hog = get_hog()
for filename in os.listdir(positive_path):
img = cv2.imread(os.path.join(positive_path,filename),0) #RGB image
img = cv2.resize(img,(64,128))
img = np.array(img)
hist = hog.compute(img)
hist = cv2.normalize(hist,None)
sam.append(img)
samples.append(hist)
labels.append(1)
i=0
for filename in os.listdir(negative_path):
img = cv2.imread(os.path.join(negative_path,filename),0)
(winW, winH) = (64,128)
pyr = pyramid(img)
for resized in pyr:
sliding = sliding_window(resized, stepSize=32, windowSize=(winW, winH))
for (x, y, window) in sliding:
if window.shape[0] != winH or window.shape[1] != winW:
continue
hist = hog.compute(window)
hist = cv2.normalize(hist,None)
sam.append(window)
samples.append(hist)
labels.append(0)
print(i)
i=i+1
samples = np.array(samples,dtype=np.float32)
labels = np.array(labels,dtype=int)
samples = np.squeeze(samples)
print(len(samples))
print(samples.shape)
rand = np.random.RandomState(10)
shuffle = rand.permutation(len(samples))
sam = samples[shuffle]
samples = sam[shuffle]
labels = labels[shuffle]
svm = cv2.ml.SVM_create()
svm.setKernel(cv2.ml.SVM_LINEAR)
svm.setType(cv2.ml.SVM_C_SVC)
svm.setC(2.67)
svm.setGamma(5.383)
svm_params = dict( kernel_type = cv2.ml.SVM_LINEAR,
svm_type = cv2.ml.SVM_C_SVC,
C=2.67, gamma=5.383 )
svm.train(samples,cv2.ml.ROW_SAMPLE,labels)
print("trained")
res = svm.predict(samples[0]).ravel()
print(res)
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
img=cv2.resize(img,(400,400))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
(winW, winH) = (64,128)
pyr = pyramid(img)
for resized in pyr:
sliding = sliding_window(resized, stepSize=32, windowSize=(winW, winH))
for (x, y, window) in sliding:
if window.shape[0] != winH or window.shape[1] != winW:
continue
hist = hog.compute(window)
hist = cv2.normalize(hist,None)
hist = np.reshape(hist,(1,hist.shape[0]))
res = svm.predict(hist)[1].ravel()
if res == 1:
print("found")
cv2.imshow('img',img)
cv2.waitKey(10)
I am working on a project of yawn detection, i am using dlib and opencv to detect the face and landmark on a video.
I want to get the length of eyes and mouth.
this is what i have done till now
import sys
import os
import dlib
import glob
from skimage import io
import cv2
import time
if len(sys.argv) != 3:
print("")
exit()
predictor_path = sys.argv[1]
faces_folder_path = sys.argv[2]
vidcap = cv2.VideoCapture('video.avi')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
win = dlib.image_window()
while vidcap.isOpened():
success, image = vidcap.read()
if success:
win.clear_overlay()
win.set_image(image)
# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should upsample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
dets = detector(image, 1)
print("Number of faces detected: {}".format(len(dets)))
for k, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
# Get the landmarks/parts for the face in box d.
shape = predictor(image, d)
print(shape)
print("Part 0: {}, Part 1: {},Part 2: {} ...".format(shape.part(0),shape.part(1),shape.part(2)))
# Draw the face landmarks on the screen.
win.add_overlay(shape)
win.add_overlay(dets)
time.sleep(0.01)
cv2.destroyAllWindows()
vidcap.release()
please help me how to get the length of open eyes and mouth.
From this figure
import Paths
import globals
from globals import ClassifierFiles
import numpy as np
import cv2
import time
import dlib
import math
import eyeCoordinates
import mouthCoordinates
from globals import Threshold
from globals import yawnFolder
import os
import openface
VIDEO_PATHS = []
readVideo('v.avi')#test video of faces
def readVideo(video):
global no,yes
video_capture = cv2.VideoCapture(video)
detector = dlib.get_frontal_face_detector() #Face detector
predictor = dlib.shape_predictor(ClassifierFiles.shapePredicter) #Landmark identifier
face_aligner = openface.AlignDlib(ClassifierFiles.shapePredicter)
u = 0
while True:
ret, frame = video_capture.read()
if frame != None:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
# clahe_image = clahe.apply(gray)
detections = detector(frame, 1) #Detect the faces in the image
for k,d in enumerate(detections): #For each detected face
shape = predictor(frame, d) #Get coordinates
vec = np.empty([68, 2], dtype = int)
coor = []
for i in range(1,68): #There are 68 landmark points on each face
#cv2.circle(frame, (shape.part(i).x, shape.part(i).y), 1, (0,0,255), thickness=1)
coor.append([shape.part(i).x, shape.part(i).y])
vec[i][0] = shape.part(i).x
vec[i][1] = shape.part(i).y
#RightEye and LeftEye coordinates
rightEye = eyeCoordinates.distanceRightEye(coor)
leftEye = eyeCoordinates.distanceLeftEye(coor)
eyes = (rightEye + leftEye)/2
#Mouth coordinates
mouth = mouthCoordinates.distanceBetweenMouth(coor)
print(eyes,mouth)
#prints both eyes average distance
#prints mouth distance
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
VIDEO_PATHS = Paths.videosPaths()
init()
eyeCoordinates File
import distanceFormulaCalculator
def distanceRightEye(c):
eR_36,eR_37,eR_38,eR_39,eR_40,eR_41 = 0,0,0,0,0,0
eR_36 = c[35]
eR_37 = c[36]
eR_38 = c[37]
eR_39 = c[38]
eR_40 = c[39]
eR_41 = c[40]
x1 = distanceFormulaCalculator.distanceFormula(eR_37,eR_41)
x2 = distanceFormulaCalculator.distanceFormula(eR_38,eR_40)
return ((x1+x2)/2)
def distanceLeftEye(c):
eL_42,eL_43,eL_44,eL_45,eL_46,eL_47 = 0,0,0,0,0,0
eL_42 = c[41]
eL_43 = c[42]
eL_44 = c[43]
eL_45 = c[44]
eL_46 = c[45]
eL_47 = c[46]
x1 = distanceFormulaCalculator.distanceFormula(eL_43,eL_47)
x2 = distanceFormulaCalculator.distanceFormula(eL_44,eL_46)
return ((x1+x2)/2)
def eyePoints():
return [36,37,38,39,40,41,42,43,44,45,46,47]
Mouth Coordinates File
import distanceFormulaCalculator
def distanceBetweenMouth(c):
m_60,m_61,m_62,m_63,m_64,m_65,m_66,m_67 = 0,0,0,0,0,0,0,0
m_60 = c[59]
m_61 = c[60]
m_62 = c[61]
m_63 = c[62]
m_64 = c[63]
m_65 = c[64]
m_66 = c[65]
m_67 = c[66]
x1 = distanceFormulaCalculator.distanceFormula(m_61,m_67)
x2 = distanceFormulaCalculator.distanceFormula(m_62,m_66)
x3 = distanceFormulaCalculator.distanceFormula(m_63,m_65)
return ((x1+x2+x3)/3)
def mouthPoints():
return [60,61,62,63,64,65,66,67]
I've tried to add the GUI (tkinter) into my script, but to no avail. If anyone can help me, i would be so grateful. I'm using Python 3.6 and i think the latest opencv?
I have only started programming 2 weeks ago. So, kinda new into all this. Basically, I want to create a window that just pick the image from my folder and then process it through the script so that whenever i want to use another image, i don't have to change the script. I hope that make sense..
this is the script that i took from Chris Dahms from youtube, and managed to change it to what I want.
import cv2
import numpy as np
import os
import DetectChars
import DetectPlates
import PossiblePlate
SCALAR_BLACK = (0.0, 0.0, 0.0)
SCALAR_WHITE = (255.0, 255.0, 255.0)
SCALAR_YELLOW = (0.0, 255.0, 255.0)
SCALAR_GREEN = (0.0, 255.0, 0.0)
SCALAR_CYAN = (255.0, 255.0, 0.0)
showSteps = False
def main():
blnKNNTrainingSuccessful = DetectChars.loadKNNDataAndTrainKNN()
if blnKNNTrainingSuccessful == False:
print ("\nerror: KNN training was not successful\n")
return
imgOriginalScene = cv2.imread("CAR/Malaysia/22.jpg")
if imgOriginalScene is None:
print ("\nerror: image not read from file \n\n")
os.system("pause")
return
if imgOriginalScene is None:
print ("\nerror: image not read from file \n\n")
os.system("pause")
return
listOfPossiblePlates = DetectPlates.detectPlatesInScene(imgOriginalScene)
listOfPossiblePlates = DetectChars.detectCharsInPlates(listOfPossiblePlates)
cv2.imshow("imgOriginalScene", imgOriginalScene)
if len(listOfPossiblePlates) == 0:
print ("\nno license plates were detected\n")
else:
listOfPossiblePlates.sort(key = lambda possiblePlate: len(possiblePlate.strChars), reverse = True)
licPlate = listOfPossiblePlates[0]
cv2.imshow("Image Plate", licPlate.imgPlate)
cv2.imshow("Image Threshold", licPlate.imgThresh)
if len(licPlate.strChars) == 0:
print ("\nno characters were detected\n\n")
return
drawRedRectangleAroundPlate(imgOriginalScene, licPlate)
print ("\nlicense plate read from image = " + licPlate.strChars + "\n")
print ("----------------------------------------")
writeLicensePlateCharsOnImage(imgOriginalScene, licPlate)
cv2.imshow("imgOriginalScene", imgOriginalScene)
cv2.imwrite("imgOriginalScene.png", imgOriginalScene)
cv2.waitKey(0)
return
def drawRedRectangleAroundPlate(imgOriginalScene, licPlate):
p2fRectPoints = cv2.boxPoints(licPlate.rrLocationOfPlateInScene)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[0]), tuple(p2fRectPoints[1]), SCALAR_RED, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[1]), tuple(p2fRectPoints[2]), SCALAR_RED, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[2]), tuple(p2fRectPoints[3]), SCALAR_RED, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[3]), tuple(p2fRectPoints[0]), SCALAR_RED, 2)
def writeLicensePlateCharsOnImage(imgOriginalScene, licPlate):
ptCenterOfTextAreaX = 0
ptCenterOfTextAreaY = 0
ptLowerLeftTextOriginX = 0
ptLowerLeftTextOriginY = 0
sceneHeight, sceneWidth, sceneNumChannels = imgOriginalScene.shape
plateHeight, plateWidth, plateNumChannels = licPlate.imgPlate.shape
intFontFace = cv2.FONT_HERSHEY_SIMPLEX
fltFontScale = float(plateHeight) / 30.0
intFontThickness = int(round(fltFontScale * 2))
textSize, baseline = cv2.getTextSize(licPlate.strChars, intFontFace, fltFontScale, intFontThickness)
( (intPlateCenterX, intPlateCenterY), (intPlateWidth, intPlateHeight), fltCorrectionAngleInDeg ) = licPlate.rrLocationOfPlateInScene
intPlateCenterX = int(intPlateCenterX)
intPlateCenterY = int(intPlateCenterY)
ptCenterOfTextAreaX = int(intPlateCenterX)
if intPlateCenterY < (sceneHeight * 0.75):
ptCenterOfTextAreaY = int(round(intPlateCenterY)) + int(round(plateHeight * 1.6))
else:
ptCenterOfTextAreaY = int(round(intPlateCenterY)) - int(round(plateHeight * 1.6))
textSizeWidth, textSizeHeight = textSize
ptLowerLeftTextOriginX = int(ptCenterOfTextAreaX - (textSizeWidth / 2))
ptLowerLeftTextOriginY = int(ptCenterOfTextAreaY + (textSizeHeight / 2))
cv2.putText(imgOriginalScene, licPlate.strChars, (ptLowerLeftTextOriginX, ptLowerLeftTextOriginY), intFontFace, fltFontScale, SCALAR_CYAN, intFontThickness)
if __name__ == "__main__":
main()
cv2.waitKey()
cv2.destroyAllWindows()
Pre-processing stage
# Preprocess.py
import numpy as np
import math
# module level variables ##########################################################################
GAUSSIAN_SMOOTH_FILTER_SIZE = (5, 5)
ADAPTIVE_THRESH_BLOCK_SIZE = 19
ADAPTIVE_THRESH_WEIGHT = 9
def preprocess(imgOriginal):
imgGrayscale = extractValue(imgOriginal)
imgMaxContrastGrayscale = maximizeContrast(imgGrayscale)
height, width = imgGrayscale.shape
grayscaled = cv2.cvtColor(imgOriginal,cv2.COLOR_BGR2GRAY)
imgBlurred = np.zeros((height, width, 1), np.uint8)
imgBlurred, otsu = cv2.threshold(grayscaled,125,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
imgThresh = cv2.medianBlur(otsu,5)
return imgGrayscale, imgThresh
# end function
def extractValue(imgOriginal):
height, width, numChannels = imgOriginal.shape
imgHSV = np.zeros((height, width, 3), np.uint8)
imgHSV = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2HSV)
imgHue, imgSaturation, imgValue = cv2.split(imgHSV)
return imgValue
# end function
def maximizeContrast(imgGrayscale):
height, width = imgGrayscale.shape
imgTopHat = np.zeros((height, width, 1), np.uint8)
imgBlackHat = np.zeros((height, width, 1), np.uint8)
structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
imgTopHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_TOPHAT, structuringElement)
imgBlackHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_BLACKHAT, structuringElement)
imgGrayscalePlusTopHat = cv2.add(imgGrayscale, imgTopHat)
imgGrayscalePlusTopHatMinusBlackHat = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)
return imgGrayscalePlusTopHatMinusBlackHat
# end function
If all you are wanting is a window to select a file then this should work.
import Tkinter
from Tkinter import *
import tkSimpleDialog
from tkFileDialog import askopenfilename
master = Tk()
master.withdraw()
my_file = askopenfilename()
mainloop()
i recommend Gtk3 for your GUI.
here's a simple Gtk window with button:
#!/usr/bin/env python3
import gi
gi.require_version( 'Gtk', '3.0' )
from gi.repository import Gtk
class Window( Gtk.Window ):
def __init__( self ):
Gtk.Window.__init__( self )
self.connect( 'destroy', lambda q: Gtk.main_quit() )
button = Gtk.Button( "Gtk.Button" )
button.connect( "clicked", self.on_button_clicked )
grid = Gtk.Grid( )
grid.attach( button, 0, 0, 1, 1 )
self.add( grid )
self.show_all()
def on_button_clicked( self, button ):
print( "Gtk.Button was clicked" )
w = Window()
Gtk.main()