Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Related to 1.getting same values of pixel in entire columna and 2. frame resolution #9

Open
Sourab1801 opened this issue May 23, 2022 · 1 comment

Comments

@Sourab1801
Copy link

Sourab1801 commented May 23, 2022

[hello] @KonstantinosAng
output matrix

    Actually I am using kinect/pykinect2 lib to connect my kinect v2 with python. I am getting the depth image also and I  did some processing also in opencv but I am facing issue when I am going to print the captured frame matrix it shows me the same values in in entire column so I am not able to detect any object coming in front of my kinect sensor. also I am attaching the screenshot of matrix output.

And my second issue is about frame resolution , In kinect/pykinect2 i am not able to reduce the resolution properly. if i want 15x15 resolution then it converts height only 15 and width is 120. when i am going to increase the resolution above 120x120 then it work properly but below 120x120 it only decreases the height and the width is 120 as it is .

my code:

import cv2

from pykinect2 import PyKinectV2
from pykinect2 import PyKinectRuntime
import ctypes
import _ctypes
import pygame
import numpy as np
from PIL import Image
MARGIN =10
WIDTH = 10
HEIGHT = 10
mog = cv2.createBackgroundSubtractorMOG2(history=2000, varThreshold=16, detectShadows=True)
class DepthRuntime(object):
def init(self): # constructor initialize(assign values) to the data members of the class
x = pygame.init() # initialize all imported pygame modules

    # Used to manage how fast the screen updates
    self._clock = pygame.time.Clock()

    # Loop until the user clicks the close button.
    self._done = False

    # Used to manage how fast the screen updates
    self._clock = pygame.time.Clock()

    # Kinect runtime object, we want only color and body frames
    self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth)

    # back buffer surface for getting Kinect infrared frames, 8bit grey, width and height equal to the Kinect color frame size
    self._frame_surface = pygame.Surface((self._kinect.depth_frame_desc.Width, self._kinect.depth_frame_desc.Height), 0, 24)
    # here we will store skeleton data
    self._bodies = None

    # Set the width and height of the screen [width, height]

    self._screen = pygame.display.set_mode( (120,120),pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE, 32)
    self._infoObject = pygame.display.Info()
    print(self._infoObject)


    pygame.display.set_caption("Kinect for Windows v2 Depth")

def draw_Depth_newframe(self, frame, target_surface):
    if frame is None:  # some usb hub do not provide the infrared image. it works with Kinect studio though
        return
    target_surface.lock()
    f7= np.uint8(frame.clip(1, 4000)/ 16.)  #clip to change the minimum value to 0 and maximum value to 250
    frame8bit = np.dstack((f7,f7,f7)) #change all RGB values to the same value
    address = self._kinect.surface_as_array(target_surface.get_buffer())
    ctypes.memmove(address,frame8bit.ctypes.data, frame8bit.size) #Moving data (frame) to that particular address
    #frame = cv2.cvtColor(frame8bit, cv2.COLOR_GRAY2RGB)
    # cv2.imshow('KINECT Video Stream', frame)
    frame8bit = frame8bit.astype(np.uint8)
    frame8bit = cv2.resize(frame8bit, (20,20))
    new_Arr = cv2.cvtColor(frame8bit, cv2.COLOR_BGR2GRAY)
    print(new_Arr)
    fgmask = mog.apply(new_Arr)
    # threshold this and clean it up using dilation with a elliptical mask
    fgthres = cv2.threshold(fgmask.copy(), 128, 255, cv2.THRESH_BINARY)[1]
    fgdilated = cv2.dilate(fgthres, kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)), iterations=3)
    bgmodel = mog.getBackgroundImage()
    cv2.imshow("bgmodel",bgmodel)
    cv2.imshow("fgdilated",fgdilated)
    cv2.imshow("fgthres",fgthres)
    cv2.imshow("new_Arr", new_Arr)

    # print(new_Arr.ndim)
    # print(new_Arr.size)
    # print(new_Arr.shape)

   #frame = frame.astype(np.uint8)
    #reshaped_arr = np.reshape(frame8bit,(1,651264))
    del address
    target_surface.unlock()

def run(self):
        # -------- Main Program Loop -----------
        while not self._done:
            # --- Main event loop
            for event in pygame.event.get():  # User did something
                if event.type == pygame.QUIT:  # If user clicked close
                    self._done = True  # Flag that we are done so we exit this loop

                elif event.type == pygame.VIDEORESIZE:  # window resized

                    self._screen = pygame.display.set_mode((0,0),pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE, 32)

            # --- Getting frames and drawing
            if self._kinect.has_new_depth_frame():
                frame = self._kinect.get_last_depth_frame()
                self.draw_Depth_newframe(frame, self._frame_surface)
                frame = None

                self._screen.blit(self._frame_surface, [0,0])
                pygame.display.update()


            # --- Go ahead and update the screen with what we've drawn.
            pygame.display.flip()

            # --- Limit to 60 frames per second
            self._clock.tick(1)


        # Close our Kinect sensor, close the window and quit.
        self._kinect.close()
        pygame.quit()

main = "Kinect v2 Depth"
game = DepthRuntime()
game.run()

please help me in this issues.
Thank you

@KonstantinosAng
Copy link
Owner

I am sorry mate, I do not have a Kinect any more, but as far as i can see you do not use my library so it is better to make this comment here:

https://github.com/Kinect/PyKinect2

its the official python kinect2 repo

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants