CARVIEW |
Select Language
HTTP/2 302
date: Thu, 24 Jul 2025 20:52:16 GMT
content-type: text/html; charset=utf-8
content-length: 0
vary: X-PJAX, X-PJAX-Container, Turbo-Visit, Turbo-Frame, X-Requested-With,Accept-Encoding, Accept, X-Requested-With
location: https://objects.githubusercontent.com/github-production-repository-file-5c1aeb/5108051/10762369?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVCODYLSA53PQK4ZA%2F20250724%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250724T205216Z&X-Amz-Expires=300&X-Amz-Signature=1393679325a721e48100add5c8adadf9663b229b309597d296b319ac45952558&X-Amz-SignedHeaders=host&response-content-disposition=attachment%3Bfilename%3Dmain.py.txt&response-content-type=text%2Fplain
cache-control: no-cache
strict-transport-security: max-age=31536000; includeSubdomains; preload
x-frame-options: deny
x-content-type-options: nosniff
x-xss-protection: 0
referrer-policy: no-referrer-when-downgrade
content-security-policy: default-src 'none'; base-uri 'self'; child-src github.githubassets.com github.com/assets-cdn/worker/ github.com/assets/ gist.github.com/assets-cdn/worker/; connect-src 'self' uploads.github.com www.githubstatus.com collector.github.com raw.githubusercontent.com api.github.com github-cloud.s3.amazonaws.com github-production-repository-file-5c1aeb.s3.amazonaws.com github-production-upload-manifest-file-7fdce7.s3.amazonaws.com github-production-user-asset-6210df.s3.amazonaws.com *.rel.tunnels.api.visualstudio.com wss://*.rel.tunnels.api.visualstudio.com objects-origin.githubusercontent.com copilot-proxy.githubusercontent.com proxy.individual.githubcopilot.com proxy.business.githubcopilot.com proxy.enterprise.githubcopilot.com *.actions.githubusercontent.com wss://*.actions.githubusercontent.com productionresultssa0.blob.core.windows.net/ productionresultssa1.blob.core.windows.net/ productionresultssa2.blob.core.windows.net/ productionresultssa3.blob.core.windows.net/ productionresultssa4.blob.core.windows.net/ productionresultssa5.blob.core.windows.net/ productionresultssa6.blob.core.windows.net/ productionresultssa7.blob.core.windows.net/ productionresultssa8.blob.core.windows.net/ productionresultssa9.blob.core.windows.net/ productionresultssa10.blob.core.windows.net/ productionresultssa11.blob.core.windows.net/ productionresultssa12.blob.core.windows.net/ productionresultssa13.blob.core.windows.net/ productionresultssa14.blob.core.windows.net/ productionresultssa15.blob.core.windows.net/ productionresultssa16.blob.core.windows.net/ productionresultssa17.blob.core.windows.net/ productionresultssa18.blob.core.windows.net/ productionresultssa19.blob.core.windows.net/ github-production-repository-image-32fea6.s3.amazonaws.com github-production-release-asset-2e65be.s3.amazonaws.com insights.github.com wss://alive.github.com api.githubcopilot.com api.individual.githubcopilot.com api.business.githubcopilot.com api.enterprise.githubcopilot.com; font-src github.githubassets.com; form-action 'self' github.com gist.github.com copilot-workspace.githubnext.com objects-origin.githubusercontent.com; frame-ancestors 'none'; frame-src viewscreen.githubusercontent.com notebooks.githubusercontent.com; img-src 'self' data: blob: github.githubassets.com media.githubusercontent.com camo.githubusercontent.com identicons.github.com avatars.githubusercontent.com private-avatars.githubusercontent.com github-cloud.s3.amazonaws.com objects.githubusercontent.com release-assets.githubusercontent.com secured-user-images.githubusercontent.com/ user-images.githubusercontent.com/ private-user-images.githubusercontent.com opengraph.githubassets.com copilotprodattachments.blob.core.windows.net/github-production-copilot-attachments/ github-production-user-asset-6210df.s3.amazonaws.com customer-stories-feed.github.com spotlights-feed.github.com objects-origin.githubusercontent.com *.githubusercontent.com; manifest-src 'self'; media-src github.com user-images.githubusercontent.com/ secured-user-images.githubusercontent.com/ private-user-images.githubusercontent.com github-production-user-asset-6210df.s3.amazonaws.com gist.github.com; script-src github.githubassets.com; style-src 'unsafe-inline' github.githubassets.com; upgrade-insecure-requests; worker-src github.githubassets.com github.com/assets-cdn/worker/ github.com/assets/ gist.github.com/assets-cdn/worker/
server: github.com
set-cookie: _gh_sess=84PIw0OLDJJPY7xcekvf%2BgiKpaRQOgjRBgf1DIxghjfbHmpHmv1Ila2sv9mJxOp5Vx6dMQhJRX8h%2FmayR8vmsDerJkY0rM%2FbGeFO815Q5t2cDJ%2FWOHBfYCSD4Zhyr04V43AxlaWHdicSCUxciYVGxBmcvnRAsAOUlyoPrQlYKnj6f0UldZK3J8iD7nKzr4H5uCusa4llyda61foeywLgy8R6%2BQnAWGhf3%2BN8pUT%2Bo8R9LO%2BBmeBpEF%2BOWM6H7tt0G2BC%2FfVEoblZgAgI7S6M1A%3D%3D--JNHidElcU%2BgYXrRL--npHz7ke6ry7RhwVmx3vcJA%3D%3D; Path=/; HttpOnly; Secure; SameSite=Lax
set-cookie: _octo=GH1.1.402456917.1753390336; Path=/; Domain=github.com; Expires=Fri, 24 Jul 2026 20:52:16 GMT; Secure; SameSite=Lax
set-cookie: logged_in=no; Path=/; Domain=github.com; Expires=Fri, 24 Jul 2026 20:52:16 GMT; HttpOnly; Secure; SameSite=Lax
x-github-request-id: 855E:249F26:6955A:8F39B:68829D00
HTTP/2 200
content-type: text/plain
last-modified: Tue, 04 Mar 2025 13:14:24 GMT
etag: "0x8DD5B1E7C040B3C"
server: Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-request-id: d1b9e916-b01e-007c-0e33-fcb90e000000
x-ms-version: 2025-05-05
x-ms-creation-time: Tue, 04 Mar 2025 13:14:24 GMT
x-ms-blob-content-md5: h+SuzUPySGomequBrvCXVQ==
x-ms-lease-status: unlocked
x-ms-lease-state: available
x-ms-blob-type: BlockBlob
content-disposition: attachment;filename=main.py.txt
x-ms-server-encrypted: true
via: 1.1 varnish, 1.1 varnish
fastly-restarts: 1
accept-ranges: bytes
age: 0
date: Thu, 24 Jul 2025 20:52:18 GMT
x-served-by: cache-iad-kiad7000123-IAD, cache-bom-vanm7210086-BOM
x-cache: MISS, HIT
x-cache-hits: 0, 0
x-timer: S1753390337.890576,VS0,VE335
content-length: 11871
#!/usr/bin/env python
"""qr.py
Usage example:
python qr.py -o out.yaml -p qrcodes/detection
-H, --help - show help
-o, --output - output file (default out.yaml)
-p, --path - input dataset path (default qrcodes/detection)
-a, --accuracy - input accuracy (default 20 pixels)
-alg, --algorithm - input alg (default opencv)
--metric - input norm (default l_inf)
"""
import argparse
import glob
from enum import Enum
import time
import numpy as np
from numpy import linalg as LA
import cv2 as cv
class DetectorQR:
TypeDetector = Enum('TypeDetector', 'opencv opencv_wechat')
def __init__(self):
self.detected_corners = np.array([])
self.decoded_info = []
self.detector = None
class CvObjDetector(DetectorQR):
def __init__(self):
super().__init__()
self.detector = cv.QRCodeDetector()
def detect(self, image, use_alignment=True):
_, decoded_info, corners, _ = self.detector.detectAndDecodeMulti(image)
if corners is None or len(corners) == 0:
return False, np.array([])
self.decoded_info = decoded_info
self.detected_corners = corners
return True, corners
def detect_aruco(self, image):
self.detector.detectMultiAruco(image)
def decode(self, image, use_alignment=True):
if use_alignment:
if len(self.decoded_info) == 0:
return 0, [], None
return True, self.decoded_info, self.detected_corners
else:
if self.detected_corners.size == 0:
return 0, [], None
r, decoded_info, straight_qrcode = self.detector.decodeMulti(image, self.detected_corners)
self.decoded_info = decoded_info
return r, decoded_info, straight_qrcode
class CvWechatDetector(DetectorQR):
def __init__(self, path_to_model="./"):
super().__init__()
self.detector = cv.wechat_qrcode_WeChatQRCode(path_to_model + "detect.prototxt",
path_to_model + "detect.caffemodel",
path_to_model + "sr.prototxt",
path_to_model + "sr.caffemodel")
def detect(self, image):
decoded_info, corners = self.detector.detectAndDecode(image)
if len(decoded_info) == 0:
return False, np.array([])
corners = np.array(corners).reshape(-1, 4, 2)
self.decoded_info = decoded_info
self.detected_corners = corners
return True, corners
def decode(self, image):
if len(self.decoded_info) == 0:
return 0, [], None
return True, self.decoded_info, self.detected_corners
def create_instance_qr(type_detector=DetectorQR.TypeDetector.opencv, path_to_model="./"):
if type_detector is DetectorQR.TypeDetector.opencv:
return CvObjDetector()
if type_detector is DetectorQR.TypeDetector.opencv_wechat:
return CvWechatDetector(path_to_model)
raise TypeError("this type_detector isn't supported")
def find_images_path(dir_path):
images = glob.glob(dir_path + '/*.jpg')
images += glob.glob(dir_path + '/*.png')
return images
def get_gold_corners(label_path):
f = open(label_path, "r")
corners = []
for line in f.readlines():
try:
f_list = [float(i) for i in line.split(" ")]
corners += f_list
except ValueError as e:
pass
return np.array(corners).reshape(-1, 4, 2)
# l_1 - https://en.wikipedia.org/wiki/Norm_(mathematics)
# l_inf - Chebyshev norm https://en.wikipedia.org/wiki/Chebyshev_distance
TypeNorm = Enum('TypeNorm', 'l1 l2 l_inf')
def get_norm(gold_corners, corners, type_dist):
if type_dist is TypeNorm.l1:
return LA.norm((gold_corners - corners).flatten(), 1)
if type_dist is TypeNorm.l2:
return LA.norm((gold_corners - corners).flatten(), 2)
if type_dist is TypeNorm.l_inf:
return LA.norm((gold_corners - corners).flatten(), np.inf)
raise TypeError("this TypeNorm isn't supported")
def get_norm_to_rotate_qr(gold_corner, corners, accuracy, type_dist=TypeNorm.l_inf):
corners = corners.reshape(-1, 4, 2)
dist = 1e9
for one_corners in corners:
dist = min(dist, get_norm(gold_corner, one_corners, type_dist))
for i in range(0, 3):
one_corners = np.roll(one_corners, 1, 0)
dist = min(dist, get_norm(gold_corner, one_corners, type_dist))
one_corners = np.flip(one_corners, 0)
dist = min(dist, get_norm(gold_corner, one_corners, type_dist))
for i in range(0, 3):
one_corners = np.roll(one_corners, 1, 0)
dist = min(dist, get_norm(gold_corner, one_corners, type_dist))
return dist
def read_output(path):
fs = cv.FileStorage(path, cv.FILE_STORAGE_READ)
root = fs.root()
for image in root.keys():
if image.split('_')[0] == "img":
image_category = image.split("_")[-2]
image_info = root.getNode(image)
corners = image_info.getNode("corners").mat()
decoded_info = image_info.getNode("decoded_info")
if not decoded_info.empty():
for i in range(decoded_info.size()):
# print(decoded_info.at(i).string())
pass
def main():
# parse command line options
parser = argparse.ArgumentParser(description="bench QR code dataset", add_help=False)
parser.add_argument("-H", "--help", help="show help", action="store_true", dest="show_help")
parser.add_argument("-o", "--output", help="output file", default="out.yaml", action="store", dest="output")
parser.add_argument("-p", "--path", help="input dataset path", default="qrcodes/detection", action="store",
dest="dataset_path")
parser.add_argument("-m", "--model", help="path to opencv_wechat model (detect.prototxt, detect.caffemodel,"
"sr.prototxt, sr.caffemodel), build opencv+contrib to get model",
default="./", action="store",
dest="model_path")
parser.add_argument("-a", "--accuracy", help="input accuracy", default="20", action="store", dest="accuracy",
type=int)
parser.add_argument("-alg", "--algorithm", help="QR detect algorithm", default="opencv", action="store",
dest="algorithm", choices=['opencv', 'opencv_wechat'], type=str)
parser.add_argument("--metric", help="Metric for distance between QR corners", default="l2", action="store",
dest="metric", choices=['l1', 'l2', 'l_inf'], type=str)
args = parser.parse_args()
show_help = args.show_help
if show_help:
parser.print_help()
return
output = args.output
dataset_path = args.dataset_path
model_path = args.model_path
accuracy = args.accuracy
algorithm = args.algorithm
metric = TypeNorm.l_inf
if args.metric == "l1":
metric = TypeNorm.l1
elif args.metric == "l2":
metric = TypeNorm.l2
list_dirs = glob.glob(dataset_path + "/*")
fs = cv.FileStorage(output, cv.FILE_STORAGE_WRITE)
detect_dict = {}
decode_dict = {}
fs.write("dataset_path", dataset_path)
gl_count = 0
gl_detect = 0
gl_decode = 0
gl_dist = 0
gl_pos_dist = 0
qr = create_instance_qr(DetectorQR.TypeDetector[algorithm], model_path)
for dir in list_dirs:
imgs_path = find_images_path(dir)
qr_count = 0
qr_detect = 0
qr_decode = 0
loc_dist = 0
loc_pos_dist = 0
for img_path in imgs_path:
label_path = img_path[:-3] + "txt"
gold_corners = get_gold_corners(label_path)
qr_count += gold_corners.shape[0]
image = cv.imread(img_path, cv.IMREAD_IGNORE_ORIENTATION)
ret, corners = qr.detect(image)
img_name = img_path[:-4].replace('\\', '_')
img_name = "img_" + img_name.replace('/', '_')
fs.startWriteStruct(img_name, cv.FILE_NODE_MAP)
fs.write("bool", int(ret))
fs.write("gold_corners", gold_corners)
fs.write("corners", corners)
if ret is True:
i = 0
r, decoded_info, straight_qrcode = qr.decode(image)
decoded_corners = []
if len(decoded_info) > 0:
for info in decoded_info:
if info != "":
qr_decode += 1
for i in range(corners.shape[0]):
decoded_corners.append(corners[i])
decoded_corners = np.array(decoded_corners)
fs.write("decoded_info", decoded_info)
for one_gold_corners in gold_corners:
dist = get_norm_to_rotate_qr(one_gold_corners, corners, accuracy, metric)
fs.write("dist_to_gold_corner_" + str(i), dist)
if dist <= accuracy:
qr_detect += 1
i += 1
gl_dist += dist
loc_dist += dist
for decode_corner, decoded_info in zip(decoded_corners, decoded_info):
if decoded_info != "":
dist = get_norm_to_rotate_qr(decode_corner, gold_corners, accuracy, metric)
gl_pos_dist += dist
loc_pos_dist += dist
fs.endWriteStruct()
category = (dir.replace('\\', '_')).replace('/', '_').split('_')[-1]
detect_dict[category] = {"nums": qr_count, "detected": qr_detect, "detected_prop": qr_detect / max(1, qr_count)}
decode_dict[category] = {"nums": qr_count, "decoded": qr_decode, "decoded_prop": qr_decode / max(1, qr_count)}
print(dir, qr_detect / max(1, qr_count), qr_decode / max(1, qr_count), qr_count)
print("loc_dist", loc_dist / max(1, qr_count), " loc_pos_dist", loc_pos_dist / max(1, qr_decode))
gl_count += qr_count
gl_detect += qr_detect
gl_decode += qr_decode
print(gl_count)
print(gl_detect)
print(gl_decode)
print("gl_dist", gl_dist / max(1, gl_count), "gl_pos_dist", gl_pos_dist / max(1, gl_decode))
print("detect", gl_detect / max(1, gl_count))
print("decode", gl_decode / max(1, gl_count))
detect_dict["total"] = {"nums": gl_count, "detected": gl_detect, "detected_prop": gl_detect / max(1, gl_count)}
fs.startWriteStruct("category_detected", cv.FILE_NODE_MAP)
for category in detect_dict:
fs.startWriteStruct(category, cv.FILE_NODE_MAP)
fs.write("nums", detect_dict[category]["nums"])
fs.write("detected", detect_dict[category]["detected"])
fs.write("detected_prop", detect_dict[category]["detected_prop"])
fs.endWriteStruct()
fs.endWriteStruct()
decode_dict["total"] = {"nums": gl_count, "decoded": gl_decode, "decoded_prop": gl_decode / max(1, gl_count)}
fs.startWriteStruct("category_decoded", cv.FILE_NODE_MAP)
for category in decode_dict:
fs.startWriteStruct(category, cv.FILE_NODE_MAP)
fs.write("nums", decode_dict[category]["nums"])
fs.write("decoded", decode_dict[category]["decoded"])
fs.write("decoded_prop", decode_dict[category]["decoded_prop"])
fs.endWriteStruct()
fs.endWriteStruct()
if __name__ == '__main__':
start = time.time()
main()
print(time.time() - start)