Issue
I am working on Raspberry Pi, and I have connected my Picam and tested it, it is running correct. But when my application it gives me error of.
ERROR:
Opening ASL Model!
/home/pi/Desktop/ASL/ASLClassifier.dat
SVM Loaded successfully..
mmal: mmal_vc_port_enable: failed to enable port vc.null_sink:in:0(OPQV): ENOSPC
mmal: mmal_port_enable: failed to enable connected port (vc.null_sink:in:0(OPQV))0x6a221df0 (ENOSPC)
mmal: mmal_connection_enable: output port couldn't be enabled
[2021-05-10 12:02:57,924] ERROR in app: Exception on /camera [POST]
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python3/dist-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python3/dist-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/python3/dist-packages/flask/_compat.py", line 35, in reraise
raise value
File "/usr/lib/python3/dist-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python3/dist-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "app.py", line 122, in camera
cap=PiCamera(0)
File "/usr/lib/python3/dist-packages/picamera/camera.py", line 433, in __init__
self._init_preview()
File "/usr/lib/python3/dist-packages/picamera/camera.py", line 513, in _init_preview
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
File "/usr/lib/python3/dist-packages/picamera/renderers.py", line 558, in __init__
self.renderer.inputs[0].connect(source).enable()
File "/usr/lib/python3/dist-packages/picamera/mmalobj.py", line 2212, in enable
prefix="Failed to enable connection")
File "/usr/lib/python3/dist-packages/picamera/exc.py", line 184, in mmal_check
raise PiCameraMMALError(status, prefix)
picamera.exc.PiCameraMMALError: Failed to enable connection: Out of resources
127.0.0.1 - - [10/May/2021 12:02:57] "POST /camera HTTP/1.1" 500 -
CAMERA ROUTE
# Camera route
@app.route("/camera", methods = ['POST'])
def camera():
print("Opening ASL Model!")
try:
direct = cwd + "/ASLClassifier.dat"
print(direct)
model=st.load(direct)
print("SVM Loaded successfully..")
except:
model=st.trainSVM(17)
cap=cv2.VideoCapture(0)
cap=PiCamera(0)
camera=PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
time.sleep(0.1)
rgbFrame = PiRGBArray(camera, size = camera.resolution)
frame1 = captureProcessFrame(camera, rgbFrame, 5)
frameCount = 0
font = cv2.FONT_HERSHEY_SIMPLEX
text= " "
temp=0
previouslabel=None # Past label
previousText=" " # Past text
label = None # current label
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
stream = frame.array # store frame input as array
rawCapture.truncate(0) # take optional size
img=stream # store stream as temp img
cv2.rectangle(img, (300,300), (100,100), (0,255,0),0) # create rectangle one screen
img1 = img[100:300, 100:300] # image stream ratio
img_ycrcb = cv2.cvtColor(img1, cv2.COLOR_BGR2YCR_CB) # color format settings
blur = cv2.GaussianBlur(img_ycrcb,(11,11),0)
skin_ycrcb_min = np.array((0, 138, 67)) # color spaces min
skin_ycrcb_max = np.array((255, 173, 133)) # color spaces max
mask = cv2.inRange(blur, skin_ycrcb_min, skin_ycrcb_max)
imgres,contours,hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE) # track motion points
cnt=ut.getMaxContour(contours,4000) #store total motion points
if cnt is not None: # is there something there?
gesture,label=ut.getGestureImg(cnt,img1,mask,model)
if(label!=None):
if(temp==0): # if temp is 0
previouslabel=label # last label now label
elif previouslabel==label: # still same gesture?
previouslabel=label # track again
temp+=1 # increse temp gesture count
else: # otherwise
temp=0 # temp gesture back to 0
if(temp==40): # has it been on screen for 40 temp counts?
if(label=='P'): # create sentence space if user does P
label=" " # lable space
text= text + label # add results to text
if(label=='Q'): # Wipe with Q(QUIT)
words = re.split(" +",text) # split result
words.pop() # push text off
text = " ".join(words) # fresh text
#text=previousText
print(text) # print out your last gestures before quiting
#cv2.imshow('PredictedGesture',gesture)
cv2.putText(img,label,(50,150), font,8,(0,125,155),2)
cv2.putText(img,text,(50,450), font,3,(0,0,255),2)
cv2.imshow('Frame',img) # show on screen
#cv2.imshow('Mask',mask)
key = cv2.waitKey(1) & 0xFF # wait for q to quit
if key == ord("q"): # is Q?
#cap.release()
cv2.destroyAllWindows() # close OpenCV session
camera.close()
break # stop while
return render_template("index.html")
My Pi cam is enabled and no other application is using it but I am still getting this error. Kindly help me, below is my FLask API code which I am using to trigger the Pi cam.
Solution
The problem was with my Pi cam, I wasn't configuring it properly and after doing that I updated my firm using,
sudo apt update and sudo apt full-upgrade
Answered By - Abdullah Mujahid Answer Checked By - Cary Denson (WPSolving Admin)