import cv2 import numpy as np from io import BytesIO from PIL import Image from ppadb.client import Client as AdbClient def get_current_screen(): return current_screen def capture_current_screen(): global current_screen current_screen = device.screencap() return current_screen def find_center(x1, y1, x2, y2): centerX = round(x1 + (x2 - x1) / 2) centerY = round(y1 + (y2 - y1) / 2) return centerX, centerY def tap(x, y=None): # Check if x is an int if isinstance(x, int): if not isinstance(y, int): raise ValueError("y must be an int when x is an int") # Construct the location string from both x and y location = f"{x} {y}" # Check if x is a string elif isinstance(x, str): location = x else: raise TypeError("x must be either an int or a string") # Assuming 'device' is a previously defined object with a 'shell' method action = f"input tap {location}" print(action) device.shell(action) def tap_button(template): button = find_template(template) if len(button) == 0: return tap(f"{button[0][0]} {button[0][1]}") def swipe(start, end, duration=1000): action = f"input swipe {start} {end} {duration}" print(action) device.shell(action) def look_for_templates(templates): for name, template in templates.items(): locations = find_template(template) if len(locations) > 0: return name, locations return None, None def find_template(template_image): target_image = Image.open(BytesIO(get_current_screen())) # Convert the image to a NumPy array and then to BGR format (which OpenCV uses) target_image = np.array(target_image) target_image = cv2.cvtColor(target_image, cv2.COLOR_RGB2BGR) h, w = template_image.shape[:-1] # Template matching result = cv2.matchTemplate(target_image, template_image, cv2.TM_CCOEFF_NORMED) # Define a threshold threshold = 0.9 # Adjust this threshold based on your requirements # Finding all locations where match exceeds threshold locations = np.where(result >= threshold) locations = list(zip(*locations[::-1])) # Create list of rectangles rectangles = [(*loc, loc[0] + w, loc[1] + h) for loc in locations] # Apply non-maximum suppression to remove overlaps rectangles = non_max_suppression(rectangles, 0.3) # Initialize an empty list to store coordinates coordinates = [] for startX, startY, endX, endY in rectangles: # Append the coordinate pair to the list coordinates.append(find_center(startX, startY, endX, endY)) # Sort the coordinates by y value in ascending order return sorted(coordinates, key=lambda x: x[1]) def non_max_suppression(boxes, overlapThresh): if len(boxes) == 0: return [] # Convert to float boxes = np.array(boxes, dtype="float") # Initialize the list of picked indexes pick = [] # Grab the coordinates of the bounding boxes x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] # Compute the area of the bounding boxes and sort by bottom-right y-coordinate area = (x2 - x1 + 1) * (y2 - y1 + 1) idxs = np.argsort(y2) # Keep looping while some indexes still remain in the indexes list while len(idxs) > 0: # Grab the last index in the indexes list and add the index value to the list of picked indexes last = len(idxs) - 1 i = idxs[last] pick.append(i) # Find the largest (x, y) coordinates for the start of the bounding box and the smallest (x, y) # coordinates for the end of the bounding box xx1 = np.maximum(x1[i], x1[idxs[:last]]) yy1 = np.maximum(y1[i], y1[idxs[:last]]) xx2 = np.minimum(x2[i], x2[idxs[:last]]) yy2 = np.minimum(y2[i], y2[idxs[:last]]) # Compute the width and height of the bounding box w = np.maximum(0, xx2 - xx1 + 1) h = np.maximum(0, yy2 - yy1 + 1) # Compute the ratio of overlap overlap = (w * h) / area[idxs[:last]] # Delete all indexes from the index list that have overlap greater than the threshold idxs = np.delete( idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0])) ) # Return only the bounding boxes that were picked return boxes[pick].astype("int") client = AdbClient(host="127.0.0.1", port=5037) device = client.device("192.168.178.32:5555") current_screen = capture_current_screen()