Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DETR模型fp16精度正常,量化精度几乎为0 #176

Open
blueWatermelonFri opened this issue Oct 12, 2024 · 5 comments
Open

DETR模型fp16精度正常,量化精度几乎为0 #176

blueWatermelonFri opened this issue Oct 12, 2024 · 5 comments

Comments

@blueWatermelonFri
Copy link

blueWatermelonFri commented Oct 12, 2024

最近在量化DETR模型,在模拟器上fp16精度的推理在coco数据集上的map正常,但是int8推理mAP为0,请问有人知道为什么吗,这个bug折磨了我两礼拜了。

虽然mAP是0,但是推理的bbox和logits不为0

我的理解是int8和fp16推理用的是同一套前后处理代码,所以前后处理肯定没有问题,唯一存在的问题可能rknn config里面,但是里面能改同的只有mean和std,但是mean和std不管改成[0,0,0],[1,1,1]或者[0,0,0],[255,255,255]结果都是0。

但是我在tensorrt上做量化mAP也是趋近于0,这是因为transformer类模型难以做量化吗?但是难以量化也不至于精度为0?

我的onnx模型文件:https://drive.google.com/file/d/1citGq4HegghVSniAC6nMpZtMFKrUScaC/view?usp=sharing

rknntoolkit版本是2.0。
代码如下:

# Description: rknn
## 模型fp16模型导出
## 模型int8导出
## 模型评估


import sys
import os
import argparse
import json
import onnxruntime
import numpy as np
import cv2
import torch
from PIL import Image

from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tqdm import tqdm
import onnx_deploy_benchmark_eval

import ctypes
import logging
logger = logging.getLogger(__name__)
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_char_p
ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object, ctypes.c_char_p]

sys.path.append('../../third_party/yolov5')    # to run '$ python *.py' files in subdirectories
sys.path.append('../tool')    # to run '$ python *.py' files in subdirectories
sys.path.append('./')    # to run '$ python *.py' files in subdirectories

from visualizer import Visualizer


from rknn.api import RKNN

from list_files_to_txt import list_files_to_txt
from list_files_to_txt import read_file_to_list


def parse():
    parser = argparse.ArgumentParser()
    parser.add_argument('--input_model_path', type=str, required=True, help='input_model_path')
    parser.add_argument('--output_model_path', type=str, required=True, help='output_model_path')
    parser.add_argument('--platform', type=str, default="rk3588", help='platform[rk3562,rk3566,rk3568,rk3588,rk1808,rv1109,rv1126]')
    # 用于确定 量化 or 非量化 量化校准
    parser.add_argument('--dtype', type=str, default="int8", help='dtype choose from [int8, fp16] for [rk3562,rk3566,rk3568,rk3588]; dtype choose from [int8, fp32] for [rk1808,rv1109,rv1126]') # 
    parser.add_argument('--calibrate_data_path', type=str, default="", help='calibrate_data_path')
    parser.add_argument('--size_limit', type=int, default=128, help='calibrate_dataset')
    parser.add_argument('--calibrate_dataset', type=str, default="calibrate_dataset.txt", help='calibrate_dataset')
    parser.add_argument('--quantized_algorithm', type=str, default="kl_divergence", help='normal/kl_divergence/mmse')
    # 是否对模型进行精度分析 是对校准数据集进行精度评估?
    parser.add_argument('--accuracy', action='store_true', help='accuracy')
    parser.add_argument('--accuracy_dataset', type=str, default="calibrate_dataset.txt", help='accuracy_dataset')
    parser.add_argument('--accuracy_output_dir', type=str, default="", help='accuracy_dataset')
    # 是否评估模型效果
    parser.add_argument('--eval', action='store_true', help='eval')
    parser.add_argument('--eval_results_file', type=str, default='', help='eval_results_file')
    # 验证集的图片 annotations
    parser.add_argument('--eval_dataset', type=str, default="/root/data/datasets/new_dataset/rename/images/calib_test", help='eval dataset')
    parser.add_argument('--annotations', type=str, default="/root/data/datasets/new_dataset/rename/annotations/calib_test_1.json", help='eval annotations')

    args = parser.parse_args()
    return args


def getJpgByAnnotations(annotations):
    with open(annotations, 'r') as file:
        data = json.load(file)
        return data["images"]

def getJpgByAnnotations(annotations):
    with open(annotations, 'r') as file:
        data = json.load(file)
        return data["images"]

import torchvision.transforms as T

transform = T.Compose([
    T.Resize((800,800)),  # PIL.Image.BILINEAR
    T.ToTensor(),
    T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

# 将xywh转xyxy
def box_cxcywh_to_xyxy(x):
    x = torch.from_numpy(x)
    x_c, y_c, w, h = x.unbind(1)
    b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
         (x_c + 0.5 * w), (y_c + 0.5 * h)]
    return torch.stack(b, dim=1)

def convert_to_xywh(boxes):
    xmin, ymin, xmax, ymax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
    width = xmax - xmin
    height = ymax - ymin
    return np.stack((xmin, ymin, width, height), axis=1)

# 将0-1映射到图像
def rescale_bboxes(out_bbox, size):
    img_w, img_h = size
    b = box_cxcywh_to_xyxy(out_bbox)
    b = b.cpu().numpy()
    b = b * np.array([img_w, img_h, img_w, img_h], dtype=np.float32)
    return b


def convert(args):
    # 是否量化
    is_quant = True
    if args.dtype == "int8":
        is_quant = True
    elif args.dtype == "fp16":
        is_quant = False

    # Create RKNN object
    rknn = RKNN(verbose=False)
    # Pre-process config
    print('--> Config model')
    # RGB2BGR 转 换 再 做mean_values 和 std_values 操作
    if is_quant:   
        rknn.config(mean_values=[[0, 0, 0]], 
                     std_values=[[1, 1, 1]], 
                    quantized_algorithm=args.quantized_algorithm, 
                    target_platform='rk3588') 
        print("args.calibrate_dataset:", args.calibrate_dataset)
    else:
        rknn.config(mean_values=[[0, 0, 0]], std_values=[[1, 1, 1]], target_platform='rk3588')

    if args.calibrate_data_path != "":
        list_files_to_txt(args.calibrate_data_path, args.calibrate_dataset, args.size_limit)

    # Load model
    print('--> Loading model')
    ret = rknn.load_onnx(model=args.input_model_path)
    if ret != 0:
        print('Load model failed!')
        exit(ret)
    print('done')

    # Build model
    print('--> Building model')
    ret = rknn.build(do_quantization=is_quant, dataset=args.calibrate_dataset)
    if ret != 0:
        print('Build model failed!')
        exit(ret)
    print('done')

    # Export rknn model
    # print('--> Export rknn model')
    # ret = rknn.export_rknn(args.output_model_path)
    # if ret != 0:
    #     print('Export rknn model failed!')
    #     exit(ret)
    # print('done')

    if args.accuracy:
        print('--> Accuracy analysis')
        file_to_list = read_file_to_list(args.accuracy_dataset)
        ret = rknn.accuracy_analysis(inputs = file_to_list,
                                      output_dir='./snapshot',
                                      target=None)

        if ret != 0:
            print('Accuracy analysis failed!')
            exit(ret)
        print('done')

    if args.eval:
        # Init runtime environment
        print('--> Init runtime environment')
        # ret = rknn.init_runtime(target=args.platform)
        ret = rknn.init_runtime()
        if ret != 0:
            print('Init runtime environment failed!')
            exit(ret)
        print('done')

        from pycocotools.coco import COCO

        results=[]
        jpgs = getJpgByAnnotations(args.annotations)

        for jpg in tqdm(jpgs):
            image_id = jpg["id"]
            path = os.path.join(args.eval_dataset, jpg["file_name"])
            im = Image.open(path).convert("RGB")

            img = transform(im).unsqueeze(0).cpu().numpy()

            scores, boxes = rknn.inference(inputs=[img], data_format=['nchw'])

            probas = torch.from_numpy(np.array(scores)).softmax(-1)[0, :, :-1]
            keep = probas.max(-1).values > 0.001
            probas = probas.cpu().detach().numpy()
            keep = keep.cpu().detach().numpy()
            bboxes_scaled = rescale_bboxes(boxes[0, keep], im.size)

            bboxes_scaled = convert_to_xywh(bboxes_scaled)

            for i in range(len(bboxes_scaled)):
                cl = probas[keep][i].argmax()
                results.append({'image_id': image_id,
                                'category_id': cl,
                                'bbox': bboxes_scaled[i],
                                'score': probas[keep][i][cl]} )

        if len(results) != 0:
            cocoGt = COCO(args.annotations) # ground truth(真值) box \ category_id(类别)\ score置信度
            cocoDt = cocoGt.loadRes(results) # 预测值 
            imgIds = sorted(cocoGt.getImgIds())
            cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
            cocoEval.params.imgIds = imgIds
            cocoEval.evaluate()
            cocoEval.accumulate()
            cocoEval.summarize()
        else:
            print("no result!")

    # Release
    rknn.release()

if __name__ == "__main__":
    args = parse()
    print("rknn convert start!")
    convert(args)
    print("rknn convert end!")
@yuyun2000
Copy link

有没有跑模型精度分析接口看一下逐层相似度?

@blueWatermelonFri
Copy link
Author

模拟器上看过,逐层相似度几乎没有损失

@yuyun2000
Copy link

yuyun2000 commented Oct 12, 2024

这么神奇的事情? 那怀疑过后处理吗?

@EvW1998
Copy link

EvW1998 commented Oct 17, 2024

模拟器上看过,逐层相似度几乎没有损失

你好,请问你的rknn-toolkit2用的什么版本?另外onnx的opset是什么版本的?

@blueWatermelonFri
Copy link
Author

模拟器上看过,逐层相似度几乎没有损失

你好,请问你的rknn-toolkit2用的什么版本?另外onnx的opset是什么版本的?

版本是2.0.0b0+9bab5682,opset是12,但是高版本的也试了,没用

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

3 participants