Переглянути джерело

move stuff to helper file

Richard Köhl 1 рік тому
батько
коміт
a0f45eabb0
2 змінених файлів з 172 додано та 82 видалено
  1. 155 0
      helper.py
  2. 17 82
      screenshot.py

+ 155 - 0
helper.py

@@ -0,0 +1,155 @@
+import cv2
+import numpy as np
+from io import BytesIO
+from PIL import Image
+from ppadb.client import Client as AdbClient
+
+
+def get_current_screen():
+    return current_screen
+
+
+def capture_current_screen():
+    global current_screen
+    current_screen = device.screencap()
+
+    return current_screen
+
+
+def find_center(x1, y1, x2, y2):
+    centerX = round(x1 + (x2 - x1) / 2)
+    centerY = round(y1 + (y2 - y1) / 2)
+    return centerX, centerY
+
+
+def tap(x, y=None):
+    # Check if x is an int
+    if isinstance(x, int):
+        if not isinstance(y, int):
+            raise ValueError("y must be an int when x is an int")
+        # Construct the location string from both x and y
+        location = f"{x} {y}"
+    # Check if x is a string
+    elif isinstance(x, str):
+        location = x
+    else:
+        raise TypeError("x must be either an int or a string")
+
+    # Assuming 'device' is a previously defined object with a 'shell' method
+    action = f"input tap {location}"
+    print(action)
+    device.shell(action)
+
+
+def tap_button(template):
+    button = find_template(template)
+    if len(button) == 0:
+        return
+    tap(f"{button[0][0]} {button[0][1]}")
+
+
+def swipe(start, end, duration=1000):
+    action = f"input swipe {start} {end} {duration}"
+    print(action)
+    device.shell(action)
+
+
+def look_for_templates(templates):
+    for name, template in templates.items():
+        locations = find_template(template)
+        if len(locations) > 0:
+            return name, locations
+
+    return None, None
+
+
+def find_template(template_image):
+    target_image = Image.open(BytesIO(get_current_screen()))
+
+    # Convert the image to a NumPy array and then to BGR format (which OpenCV uses)
+    target_image = np.array(target_image)
+    target_image = cv2.cvtColor(target_image, cv2.COLOR_RGB2BGR)
+
+    h, w = template_image.shape[:-1]
+
+    # Template matching
+    result = cv2.matchTemplate(target_image, template_image, cv2.TM_CCOEFF_NORMED)
+
+    # Define a threshold
+    threshold = 0.9  # Adjust this threshold based on your requirements
+
+    # Finding all locations where match exceeds threshold
+    locations = np.where(result >= threshold)
+    locations = list(zip(*locations[::-1]))
+
+    # Create list of rectangles
+    rectangles = [(*loc, loc[0] + w, loc[1] + h) for loc in locations]
+
+    # Apply non-maximum suppression to remove overlaps
+    rectangles = non_max_suppression(rectangles, 0.3)
+
+    # Initialize an empty list to store coordinates
+    coordinates = []
+
+    for startX, startY, endX, endY in rectangles:
+        # Append the coordinate pair to the list
+        coordinates.append(find_center(startX, startY, endX, endY))
+
+    # Sort the coordinates by y value in ascending order
+    return sorted(coordinates, key=lambda x: x[1])
+
+
+def non_max_suppression(boxes, overlapThresh):
+    if len(boxes) == 0:
+        return []
+
+    # Convert to float
+    boxes = np.array(boxes, dtype="float")
+
+    # Initialize the list of picked indexes
+    pick = []
+
+    # Grab the coordinates of the bounding boxes
+    x1 = boxes[:, 0]
+    y1 = boxes[:, 1]
+    x2 = boxes[:, 2]
+    y2 = boxes[:, 3]
+
+    # Compute the area of the bounding boxes and sort by bottom-right y-coordinate
+    area = (x2 - x1 + 1) * (y2 - y1 + 1)
+    idxs = np.argsort(y2)
+
+    # Keep looping while some indexes still remain in the indexes list
+    while len(idxs) > 0:
+        # Grab the last index in the indexes list and add the index value to the list of picked indexes
+        last = len(idxs) - 1
+        i = idxs[last]
+        pick.append(i)
+
+        # Find the largest (x, y) coordinates for the start of the bounding box and the smallest (x, y)
+        # coordinates for the end of the bounding box
+        xx1 = np.maximum(x1[i], x1[idxs[:last]])
+        yy1 = np.maximum(y1[i], y1[idxs[:last]])
+        xx2 = np.minimum(x2[i], x2[idxs[:last]])
+        yy2 = np.minimum(y2[i], y2[idxs[:last]])
+
+        # Compute the width and height of the bounding box
+        w = np.maximum(0, xx2 - xx1 + 1)
+        h = np.maximum(0, yy2 - yy1 + 1)
+
+        # Compute the ratio of overlap
+        overlap = (w * h) / area[idxs[:last]]
+
+        # Delete all indexes from the index list that have overlap greater than the threshold
+        idxs = np.delete(
+            idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0]))
+        )
+
+    # Return only the bounding boxes that were picked
+    return boxes[pick].astype("int")
+
+
+client = AdbClient(host="127.0.0.1", port=5037)
+device = client.device("192.168.178.32:5555")
+
+current_screen = capture_current_screen()

+ 17 - 82
screenshot.py

@@ -1,15 +1,16 @@
-import cv2
 import time
 import datetime
 import numpy as np
 import io
-from PIL import Image
-from io import BytesIO
 from ppadb.client import Client as AdbClient
 from PIL import Image
-
-client = AdbClient(host="127.0.0.1", port=5037)
-device = client.device("192.168.178.32:5555")
+from helper import (
+    cv2,
+    find_template,
+    tap as tap_helper,
+    swipe as swipe_helper,
+    capture_current_screen,
+)
 
 # templates
 close_sub_fights = cv2.imread("templates/close_sub_fights.jpg")
@@ -78,31 +79,9 @@ def non_max_suppression(boxes, overlapThresh):
     return boxes[pick].astype("int")
 
 
-def screen_has_changed(prev_screenshot, threshold=0.01):
-    # Take a new screenshot
-    current_screenshot = device.screencap()
-
-    # Convert to NumPy arrays
-    prev_img = np.frombuffer(prev_screenshot, dtype=np.uint8)
-    current_img = np.frombuffer(current_screenshot, dtype=np.uint8)
-
-    # Load images
-    prev_img = cv2.imdecode(prev_img, cv2.IMREAD_COLOR)
-    current_img = cv2.imdecode(current_img, cv2.IMREAD_COLOR)
-
-    # Calculate absolute difference
-    diff = cv2.absdiff(prev_img, current_img)
-    non_zero_count = np.count_nonzero(diff)
-
-    # print(f"diff: {non_zero_count} > {threshold * diff.size} = {non_zero_count > threshold * diff.size}")
-
-    # Check if the difference is greater than the threshold
-    return non_zero_count > threshold * diff.size
-
-
 def save_screenshot():
     # Take a screenshot
-    result = device.screencap()
+    result = capture_current_screen()
 
     timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
     image = Image.open(io.BytesIO(result))
@@ -111,25 +90,17 @@ def save_screenshot():
     image = image.convert("RGB")  # Convert to RGB mode for JPEG
     with open(jpeg_filename, "wb") as fp:
         image.save(fp, format="JPEG", quality=85)  # Adjust quality as needed
-
+    print(f"snap: {jpeg_filename}")
     time.sleep(0.5)
 
 
-def wait_for_screen_change():
-    # Usage example
-    prev_screenshot = device.screencap()
-
-    while not screen_has_changed(prev_screenshot):
-        time.sleep(0.1)  # Polling interval
-
-
 def tap(location):
-    device.shell(f"input tap {location}")
+    tap_helper(location)
     time.sleep(1)
 
 
-def swipe(start, end):
-    device.shell(f"input swipe {start} {end} 1000")
+def swipe(start, end, duration=1000):
+    swipe_helper(start, end, duration)
     time.sleep(0.5)
 
 
@@ -153,47 +124,6 @@ def is_end_of_log():
     return result
 
 
-def find_templates(template_image):
-    screenshot = device.screencap()
-    target_image = Image.open(BytesIO(screenshot))
-
-    # Convert the image to a NumPy array and then to BGR format (which OpenCV uses)
-    target_image = np.array(target_image)
-    target_image = cv2.cvtColor(target_image, cv2.COLOR_RGB2BGR)
-
-    w, h = template_image.shape[:-1]
-
-    # Template matching
-    result = cv2.matchTemplate(target_image, template_image, cv2.TM_CCOEFF_NORMED)
-
-    # Define a threshold
-    threshold = 0.9  # Adjust this threshold based on your requirements
-
-    # Finding all locations where match exceeds threshold
-    locations = np.where(result >= threshold)
-    locations = list(zip(*locations[::-1]))
-
-    # Create list of rectangles
-    rectangles = [(*loc, loc[0] + w, loc[1] + h) for loc in locations]
-
-    # Apply non-maximum suppression to remove overlaps
-    rectangles = non_max_suppression(rectangles, 0.3)
-
-    # Initialize an empty list to store coordinates
-    coordinates = []
-
-    for startX, startY, endX, endY in rectangles:
-        # Calculate the center coordinates
-        centerX = round(startX + (endX - startX) / 2)
-        centerY = round(startY + (endY - startY) / 2)
-
-        # Append the coordinate pair to the list
-        coordinates.append((centerX, centerY))
-
-    # Sort the coordinates by y value in ascending order
-    return sorted(coordinates, key=lambda x: x[1])
-
-
 def find_max_y_pair(coordinates):
     # find the coordinate pair with the maximum y value
     result = max(coordinates, key=lambda x: x[1])
@@ -212,6 +142,11 @@ def take_fight_screenshots():
     time.sleep(1)
 
 
+def find_templates(template):
+    capture_current_screen()
+    return find_template(template)
+
+
 def process_war_log():
     buttons = find_templates(fight_button)