跳转至

常见问题及代码

Python获取本机IP

img.png

透视变换

img_1.png

解决OpenCV错误AttributeError: module 'cv2.cv2' has no attribute 'xfeature2d'

img_2.png

Nginx转发WebSocket出现错误WebSocket failed: Error during WebSocket handshake: Unexpected response code: 400

在Nginx配置对应的location里加上

proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";

因为WebSocket链接非要用Http 1.1,nginx非不给,所以给了就行了

Python调整图片的饱和度和亮度

img_3.png

1920*1080的图片一张大概要90MS 还是有点慢的 还是推荐ffmpeg -filter_complex "hue=s=2"一步到位,ffmpeg是真的强

Python使用LUT给图片添加滤镜

完整代码

import datetime
import os
import pickle

import cv2
import numpy


def contrast_and_brightness(img, alpha, beta):
    """
    调整图片的对比度和亮度
    https://www.codeleading.com/article/5833955990/
    :param img:
    :param alpha: 对比度
    :param beta: 亮度
    :return:
    """
    blank = numpy.zeros(img.shape, img.dtype)
    dst = cv2.addWeighted(img, alpha, blank, 1 - alpha, beta)
    return dst


def getBGR(img, table, i, j):
    # 获取图像颜色
    b, g, r = img[i][j]
    # 计算标准颜色表中颜色的位置坐标
    x = int(g / 4 + int(b / 32) * 63)
    y = int(r / 4 + int((b % 32) / 4) * 63)
    # 返回滤镜颜色表中对应的颜色
    return lj_map[x][y]


class MYLUT:
    def __init__(self, lutpath):
        lut = cv2.imread(lutpath)
        cube64rows = 8
        cube64size = 64
        # cube256rows = 16
        cube256size = 256
        cubescale = int(cube256size / cube64size)  # 4
        reshapelut = numpy.zeros((cube256size, cube256size, cube256size, 3))
        for i in range(cube64size):
            cx = int((i % cube64rows) * cube64size)
            cy = int((i / cube64rows) * cube64size)
            cube64 = lut[cy:cy + cube64size, cx:cx + cube64size]
            _rows, _cols, _ = cube64.shape
            if _rows == 0 or _cols == 0:
                continue
            cube256 = cv2.resize(cube64, (cube256size, cube256size))
            i = i * cubescale
            for k in range(cubescale):
                reshapelut[i + k] = cube256
        self.lut = reshapelut

    def imageInLut(self, src):
        arr = src.copy()
        bs = arr[:, :, 0]
        gs = arr[:, :, 1]
        rs = arr[:, :, 2]
        arr[:, :] = self.lut[bs, gs, rs]  # numpy写的越骚,运行速度越快
        return arr


def getBGR(img, table, i, j):
    # 获取图像颜色
    b, g, r = img[i][j]
    # 计算标准颜色表中颜色的位置坐标
    x = int(g / 4 + int(b / 32) * 64)
    y = int(r / 4 + int((b % 32) / 4) * 64)
    # 返回滤镜颜色表中对应的颜色
    return lj_map[x][y]


if __name__ == '__main__':
    v = cv2.VideoCapture(r"C:\Users\wojia\Desktop\2020-12-24_12-50-17.mp4")
    with open(r"C:\Users\wojia\OneDrive - buaa\1\口红", "rb") as f:
        color_map = pickle.load(f)
    while True:
        ret, frame = v.read()
        if not ret:
            break
        s = datetime.datetime.now()
        dst = numpy.zeros((frame.shape[0], frame.shape[1], 3), dtype=numpy.uint8)

        bs = frame[:, :, 0]
        gs = frame[:, :, 1]
        rs = frame[:, :, 2]


        dst[:,:] = color_map[rs, gs, bs]  # numpy写的越骚,运行速度越快
        print((datetime.datetime.now() - s).total_seconds() * 1000)
        cv2.namedWindow("result", cv2.WINDOW_KEEPRATIO)
        cv2.imshow('result', numpy.hstack((frame, dst)))
        cv2.waitKey(1)
    exit()
    # 读取原始图像
    img = cv2.imread(r"C:\Users\wojia\OneDrive - buaa\1\1.jpg")
    lj_map = cv2.imread(r"C:\Users\wojia\OneDrive - buaa\1\yujiannan.png")
    # color_map = numpy.ones((256, 256, 256, 3))
    # # 获取图像行和列
    rows, cols = img.shape[:2]
    #
    # # 新建目标图像

    with open(r"C:\Users\wojia\OneDrive - buaa\1\口红", "rb") as f:
        color_map = pickle.load(f)
    dst = numpy.zeros((rows, cols, 3), dtype="uint8")
    # for r in range(256):
    #     for g in range(256):
    #         for b in range(256):
    #             x = int(g / 4 + int(b / 32) * 64)
    #             y = int(r / 4 + int((b % 32) / 4) * 64)
    #             color_map[r][g][b] =  lj_map[x][y]
    #     print(r)
    # with open(r"C:\Users\wojia\OneDrive - buaa\1\口红", "wb") as f:
    #     pickle.dump(color_map, f)
    # exit()
    bs = img[:, :, 0]
    gs = img[:, :, 1]
    rs = img[:, :, 2]

    dst = color_map[rs, gs, bs]  # numpy写的越骚,运行速度越快

    # 循环设置滤镜颜色

    # 显示图像

    cv2.namedWindow("result", cv2.WINDOW_KEEPRATIO)
    cv2.imshow('result', numpy.hstack((img, dst)))
    cv2.waitKey(0)
    cv2.destroyAllWindows()

LUT素材图 1.png 1.png是原图 512*512的(必须是)然后对这个图进行调色,然后把给定的图片查表替换对应位置的像素就实现了同样的滤镜效果

img_4.png

先把所有色素的映射记录下来color_map ,这个过程大概三十多秒,以后用的话可以把color_map先pickle起来以后读取就快了

img_5.png

然后直接用numpy批量替换像素就行了,1920*1080的一张,完整代码

img_6.png

如果用ffmpeg的话,这个LUT图需要变成ffmpeg的格式,命令是 ffmpeg -i /home/senseport0/Workspace/HighVenueServer/temp/乐活竹山路/1号场地/2020-12-24_12-50-17.mp4 -i /home/senseport0/Workspace/HighVenueServer/static/image/lehuozhushanlu.png -i /home/senseport0/Workspace/HighVenueServer/static/image/logo.png -i /home/senseport0/Workspace/HighVenueServer/temp/乐活竹山路/1 号场地/yujiannan.png -filter_complex '[0][3]haldclut[new_video];[1]scale=w=350:h=94[logo1];[2]scale=w=250:h=64[logo2];[new_video][logo1]overlay=main_w-overlay_w-30:main_h-overlay_h-30[image];[image][logo2]overlay=main_w-overlay_w-30:30' -i /home/senseport0/Workspace/HighVenueServer/static/music/ExtremeSport-极限运动_Version_A-Dr_爱给网_aigei_com.mp3 -vol 40 -af apad -b 1M -shortest -c:v h264_nvenc -y /home/senseport0/Workspace/HighVenueServer/scripts/output/process_video/2020-12-17_17-06-36_乐活竹山路_1号场地.mp4

使用apt安装Python

sudo apt update
sudo apt install software-properties-common -y
sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt install python3.8 -y

安装Poetry

curl -sSL https://install.python-poetry.org | python3 -

评论