Mqtt publishing message when face is detected - python

I'm working on a university project combining MQTT and Face recognition, using a raspberry pi.
First of all I want to say that face recognition works normally when it's being run on its own but I have something that bugs me when applying it with MQTT.
Inside the Try section, when a person is being recognized or unknown it publishes really fine every 3 secs.
The strange thing is that if I put my hand in front of the camera or if I get out of range, it keeps on sending the ID of the last person it had recognized.
I want it either do nothing or publish none.
Any idea or suggestion to fix this?
Thanks in advance
EDIT 1: Basically as I figured out, it has to do more with the face recognition part. When using just a face recognition code using opencv it runs smoothly. But the thing complicates when I put my hand in front of the camera or go away, because the capture freezes at the last thing it sees so it keeps on printing/publishing the same thing. Haven't figured out a way to avoid this yet. Any help would be appreciated
import cv2
import json
import time
import datetime as dt
import paho.mqtt.client as mqtt
detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
recognizer = cv2.face.LBPHFaceRecognizer_create()
broker = "*****"
port = *****
username = '*****'
password = '*****'
def on_connect(client, userdata, flags, rc):
print("Connected with code: ", rc)
client.subscribe('test/#')
def on_message(client, userdata, msg):
print(str(msg.payload))
client = mqtt.Client("Python1")
client.on_connect = on_connect
client.on_message = on_message
client.username_pw_set(username, password)
client.connect(broker, port, 60)
client.loop_start()
time.sleep(1)
def facerecognizer():
recognizer.read("trainerdir/training.yml")
font = cv2.FONT_HERSHEY_SIMPLEX
# Loading data
with open('data.json', 'r') as f:
names = json.load(f)
# reverse the data
# NOTE: for k, v !!
# else it raises error !
names = {v: k for k, v in names.items()}
# print(names)
print("[INFO] Face recognition is starting..")
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)
try:
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray,
scaleFactor=1.3,
minNeighbors=5
# minSize = (20,20)
)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = img[y:y + h, x:x + w]
ID, confidence = recognizer.predict(roi_gray)
if (confidence < 100):
ID = names[ID]
confidence = "{}%".format(round(100 - confidence))
while True:
client.publish("Tutorial/", ID + " " + str(dt.datetime.now()))
print('ID sent')
time.sleep(3)
else:
client.publish("Tutorial/", 'None')
print('ID sent')
time.sleep(3)
client.loop_forever()
else:
ID = "Unkown"
confidence = "{}%".format(round(100 - confidence))
print(ID, dt.datetime.now())
while True:
client.publish("Tutorial/", ID + " " + str(dt.datetime.now()))
print('ID sent')
time.sleep(3)
else:
client.publish("Tutorial/", 'None')
print('ID sent')
time.sleep(3)
client.loop_forever()
# except UnboundLocalError:
# print("Error occured. Exitting..")
except KeyboardInterrupt:
pass
except KeyError as K:
print(K)
print('[INFO] Name Value is a string and not an integer')
print("[INFO] Exiting program..")
cap.release()

From the documentation on mqtt clients, you seem to be misusing the client.loop_forever() method.
http://www.steves-internet-guide.com/loop-python-mqtt-client/
TLDR
Use client.loop(timeout) in a loop to control processing of updates.
Use client.loop_start() starts a new thread which handles updates for you.
Use client.loop_forever() to block and process updates.
Since you have already called client.loop_start(), you don't need to call loop_forever(). Removing those calls from your code should resolve the issue.
Side note: your time.sleep(3) call could be moved to the end of the loop, since it should happen no matter which conditional path it takes.

By using if True: instead of while True: I fixed it. I also removed the subclient.loop_forever() and it works fine.
Here is the sample code that is different from the code I posted to my question
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
roi_gray = gray[y:y + h, x:x + w]
# roi_color = img[y:y + h, x:x + w]
ID, confidence = recognizer.predict(roi_gray)
# 0 is 100% accuracy , and 100 is 0% accuracy
if (confidence < 100):
ID = names[ID]
if True:
subclient.publish("project/IDReceiver", ID + " " + str(dt.datetime.now()))
print('ID sent')
time.sleep(3)
# subclient.loop_forever()
else:
ID = "Unknown"
if True:
subclient.publish("project/IDReceiver", "Unknown " + str(dt.datetime.now()))
print('ID sent')
time.sleep(3)

Related

pickle error with multiprocessing and cv2

I'm trying to implement a code that takes a video and divides it into frames while also filtering the frames and saving them to a different directory.
I am running into an error that says:
TypeError: cannot pickle 'cv2.VideoCapture' object
I have tried to understand why this problem occurs but I'm still unsure why.
here is my code:
import cv2
import os
import time
import matplotlib.image as pltim
from multiprocessing import Process, Lock
import matplotlib.pyplot as plt
def saveFramesUnfiltered(vid, lock):
currentFrame = 0 # counter for frames (to organize them by order)
while True:
success, frame = vid.read()
lock.acquire()
cv2.imwrite("./framesBefore/frame" + str(currentFrame) + '.jpg',
frame) # save unfiltered frame to folder and show the video (using the frames)
lock.release()
currentFrame = currentFrame + 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break # end loop when finished
time.sleep(0.01)
def saveFramesFiltered(lock):
currentFrame = 0
framemax = 215
while currentFrame < framemax:
if os.path.exists("framesBefore/frame" + str(currentFrame) + '.jpg'):
lock.acquire()
image = pltim.imread("./framesBefore/frame" + str(currentFrame) + '.jpg')
lock.release()
r, g, b = image[:, :, 0], image[:, :, 1], image[:, :, 2]
grayImage = 0.299 * r + 0.587 * g + 0.114 * b
plt.plot(grayImage, cmap="gray")
plt.axis("off")
lock.acquire()
plt.savefig("./framesAfter/grayImage" + str(currentFrame) + ".jpg", bbox_inches='tight', pad_inches=0)
lock.release()
time.sleep(0.01)
def main():
if not os.path.exists('framesBefore'):
os.makedirs('framesBefore') # create a folder for the unfiltered frames
if not os.path.exists('framesAfter'):
os.makedirs('framesAfter') # create a folder for the filtered frames
lock = Lock()
vid = cv2.VideoCapture("maxwell cat.mp4") # getting the video
unfiltered_process = Process(target=saveFramesUnfiltered, args=(vid, lock))
filtered_process = Process(target=saveFramesFiltered, args=lock)
unfiltered_process.start()
filtered_process.start()
unfiltered_process.join()
filtered_process.join()
vid.release()
cv2.destroyAllWindows() # clear memory
if __name__ == '__main__':
main()
I am also new at using threads in python and in general so I would like to know if the way I implemented it is correct.
thanks!

Detecting water in a plant stem in real time using computer vision (OpenCV and Python)

I am working on an experiment with plants in a pressure chamber. I need to be able to identify with a computer vision algorithm the exact moment when water starts to appear at the cut end of the stem. In the case of this video - taken from a USB microscope, this is the interval between 0:30 and 0:34 seconds, approximately.
I tried to use MOG, MOG2 and GMG as a background subtractor, and compare the histograms of each frame (using chi-squared, bhattacharyya, correlation), looking for changes that could be significant, however still without success. Is there a better alternative for this type of work?
Below, some code (made with the help of a friend)
import numpy as np
import sys
import time
import cv2
from matplotlib import pyplot as plt
video_filename = 'M20201022_004.mov'
capture = cv2.VideoCapture(video_filename)
#fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
fgbg = cv2.createBackgroundSubtractorMOG2()
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#fgbg = cv2.bgsegm.createBackgroundSubtractorGMG()
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
frames_per_second = capture.get(cv2.CAP_PROP_FPS)
num_frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
print(' height: {}\n width: {}\n fps: {}\n num_frames: {}\n'.format(height, width,frames_per_second, num_frames))
frameCounter = 0
t = time.process_time()
dist_hist = 0 # distance between histograms
frame_hist = 0
time_hist = 0
#write file
file1 = open("resultado.txt","w")
if not capture.isOpened():
print("Could not open video")
print('frameCounter: {}'.format(frameCounter))
sys.exit(1)
while capture.isOpened():
success, frame = capture.read()
frameCounter += 1
# Test for read error
if not success:
print('Failed to read video - Video Capture EOF or Error')
print('frameCounter:{}'.format(frameCounter))
if frameCounter == num_frames + 1:
print('EOF found')
else:
print('error')
break
#sys.exit(1)
else:
if frameCounter % 1000 == 0:
print('type:{} size:{} dtype:{} len(shape):{} contador:{}'.format(type(frame),frame.size,frame.dtype,len(frame.shape),frameCounter))
if len(frame.shape) < 3: # grayscale
h, w = frame.shape
print('h:{} w:{}'.format(h, w))
else: # color image
h, w, ch = frame.shape
print('h:{} w:{} ch:{}'.format(h, w, ch))
fgmask = fgbg.apply(frame)
#fgmask = fgbg.apply(frame)
#fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
# Initial histogram Test
if frameCounter == 1:
hist_initial = cv2.calcHist([fgmask], [0],None,[16],[0, 256])
# print('hist_initial:{}'.format(hist_initial))
#elapsed_time = time.process_time() - t
elapsed_time = frameCounter / frames_per_second
# Process Histogram
hist_process = cv2.calcHist([fgmask], [0],None,[16],[0, 256])
dist = cv2.compareHist(hist_initial, hist_process,cv2.HISTCMP_CHISQR)
str1 = str(frameCounter) + "," + str(dist) + "," + str(dist_hist) + "," + str(elapsed_time)
file1.write(str1)
file1.write("\n")
if dist > dist_hist: # Depending on compareHist method
dist_hist = dist
time_hist = elapsed_time
frame_hist = frameCounter
# Print line at image
strfmt = 'frame: {} elapsed_time: {:7.2f}'.format(frameCounter, elapsed_time)
cv2.putText(frame, strfmt, (0, 50),cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,255), 1, cv2.LINE_AA)
cv2.imshow('frame', frame)
cv2.imshow('fgmask', fgmask)
if cv2.waitKey(1) & 0xff == 27: # ESC pressed
break
print('---> frame:{} dist:{:10.6f} time:{:7.2f}'.format(frame_hist, dist_hist,time_hist))
capture.release()
cv2.destroyAllWindows()
file1.close()
Any help appreciated!

Python: Automatically reconnect IP camera

My IP camera seems to be a little unstable and disconnects randomly. I'd like my script to be able to determine when its disconnected and attempt to reconnect a few times, probably waiting 5-10 seconds between attempts. I've tried a few things, but nothing is working.
This is my basic script, when ret is false the script ends:
#!/usr/local/bin/python3
import cv2
import time
import datetime
print("start time: " + datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"))
cap = cv2.VideoCapture('rtsp://<ip><port>/live0.264')
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Confirm we have a valid image returned
if not ret:
print("disconnected!")
break
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
# Display the resulting frame
cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print("end time: " + time.strftime("%X"))
# When everything is done, release the capture
cap.release()
cv2.destroyAllWindows()
Edit: I would also like the script to try to reconnect to the camera in the event that my network goes down temporarily or anything like that as well.
I was finally able to solve this myself. Hopefully this is useful for anyone else looking to do the same thing.
This is actually a shell of a more complex script that has logic for motion detection and video recording when motion is detected. Everything is working very well with this basic logic (and my crappy IP camera) although I am still doing testing.
#!/usr/local/bin/python3
import cv2
import datetime
import time
def reset_attempts():
return 50
def process_video(attempts):
while(True):
(grabbed, frame) = camera.read()
if not grabbed:
print("disconnected!")
camera.release()
if attempts > 0:
time.sleep(5)
return True
else:
return False
recall = True
attempts = reset_attempts()
while(recall):
camera = cv2.VideoCapture("rtsp://<ip><port>/live0.264")
if camera.isOpened():
print("[INFO] Camera connected at " +
datetime.datetime.now().strftime("%m-%d-%Y %I:%M:%S%p"))
attempts = reset_attempts()
recall = process_video(attempts)
else:
print("Camera not opened " +
datetime.datetime.now().strftime("%m-%d-%Y %I:%M:%S%p"))
camera.release()
attempts -= 1
print("attempts: " + str(attempts))
# give the camera some time to recover
time.sleep(5)
continue
More detailed description:
https://github.com/Combinacijus/various-code-samples/tree/master/Python/OpenCV/ip_cam_reconnecting
Wrote a class to deal with IP camera disconnecting randomly. Main idea is to check if cap.read() returns a frame and if it doesn't it tries to reconnect to the camera.
import cv2
import requests # NOTE: Only used for forceful reconnection
import time # NOTE: Only used for throttling down printing when connection is lost
class IPVideoCapture:
def __init__(self, cam_address, cam_force_address=None, blocking=False):
"""
:param cam_address: ip address of the camera feed
:param cam_force_address: ip address to disconnect other clients (forcefully take over)
:param blocking: if true read() and reconnect_camera() methods blocks until ip camera is reconnected
"""
self.cam_address = cam_address
self.cam_force_address = cam_force_address
self.blocking = blocking
self.capture = None
self.RECONNECTION_PERIOD = 0.5 # NOTE: Can be changed. Used to throttle down printing
self.reconnect_camera()
def reconnect_camera(self):
while True:
try:
if self.cam_force_address is not None:
requests.get(self.cam_force_address)
self.capture = cv2.VideoCapture(self.cam_address)
if not self.capture.isOpened():
raise Exception("Could not connect to a camera: {0}".format(self.cam_address))
print("Connected to a camera: {}".format(self.cam_address))
break
except Exception as e:
print(e)
if self.blocking is False:
break
time.sleep(self.RECONNECTION_PERIOD)
def read(self):
"""
Reads frame and if frame is not received tries to reconnect the camera
:return: ret - bool witch specifies if frame was read successfully
frame - opencv image from the camera
"""
ret, frame = self.capture.read()
if ret is False:
self.reconnect_camera()
return ret, frame
if __name__ == "__main__":
CAM_ADDRESS = "http://192.168.8.102:4747/video" # NOTE: Change
CAM_FORCE_ADDRESS = "http://192.168.8.102:4747/override" # NOTE: Change or omit
cap = IPVideoCapture(CAM_ADDRESS, CAM_FORCE_ADDRESS, blocking=True)
# cap = IPVideoCapture(CAM_ADDRESS) # Minimal init example
while True:
ret, frame = cap.read()
if ret is True:
cv2.imshow(CAM_ADDRESS, frame)
if cv2.waitKey(1) == ord("q"):
break

Python : upload picture without disturbing normal flow of a program

In my code I am constantly grabbing frames from a camera to check if there is any human body present or not. whenever there is a human, crop the body and upload it on server. and keep doing this.
PROBLEM: Whenever I start a thread to upload photo to server my program execution stops and wait for the upload thread to finish. I don't want my program execution to stop and wait. I want it to run without stopping. I want to start a separate thread to upload photo that runs parallel, does its job without disturbing normal flow and finishes after it. it should do this every time there is a body detected.
# USAGE
# python detect.py --images images
# import the necessary packages
from __future__ import print_function
from imutils.object_detection import non_max_suppression
from imutils import paths
import numpy as np
import argparse
import imutils
import cv2
import time
import threading
import Queue
import multiprocessing
import requests
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
import urllib2
from urllib2 import Request, urlopen, URLError
import Queue
import urllib
import traceback
size = 2
i=0
#Queues to store data
queue_FACES = multiprocessing.Queue()
(im_width, im_height) = (112, 112)
# initialize the HOG descriptor/person detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# Capture Camera Stream
#webcam = cv2.VideoCapture('/home/irum/Desktop/WIN_20170529_09_53_13_Pro.mp4')
webcam = cv2.VideoCapture(0)
#h=4.27 w=4.29 AVG = 4.28
# Upload to server
def upload_internet(filename2,sampleFile,check_path1):
#print("upoading....")
filename2 = filename2+'.jpg'
#print (filename2)
register_openers()
datagen, headers = multipart_encode({"sampleFile": open(sampleFile), "name": filename2})
#request = urllib2.Request("http://videoupload.hopto.org:5000/api/Sync_log", datagen, headers)
request = urllib2.Request("http://videoupload.hopto.org:5002/api/Synclog", datagen, headers)
try:
#print ("***UPLOAD SERVER RESPONSE***")
response = urllib2.urlopen(request)
html=response.read()
print ("html ",html)
#resp = json.loads(html)
# with open('output_file.txt', "wb") as code: #CHANGE PATH
# code.write(curr_time+"\n"+html +"\n")
except URLError , e:
if hasattr(e, 'reason'):
#print ('We failed to reach a server.')
print ('Reason: ', e.reason)
elif hasattr(e, 'code'):
#print ('The server couldn\'t fulfill the request.')
print ('Error code: ', e.code)
except Exception:
print ('generic exception: ' + traceback.format_exc())
while True:
# read each frame
ret, frame = webcam.read()
# resize it
image = imutils.resize(frame, width=min(300, frame.shape[1]))
orig = image.copy()
# detect people in the frame
(rects, weights) = hog.detectMultiScale(image, winStride=(4, 4),
padding=(8, 8), scale=1.05)
# draw the original bounding boxes
for i in range(len(rects)):
body_i = rects[i]
(x, y, w, h) = [v * 1 for v in body_i]
cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)
# apply non-maxima suppression
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
# draw the final bounding boxes
for i in range(len(rects)):
body_i = rects[i]
(xA, yA, xB, yB) = [int(v * 1) for v in body_i]
# rect on scaled image
cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
# rects to map on original frame
(x1, y1, w1, h1) = [int(v * 4.28) for v in body_i]
cv2.rectangle(frame, (x1, y1), (w1, h1), (0, 45, 255), 2)
# Crop body from Original frame
body_big = frame[y1:y1+h1, x1:x1+w1]
# Save body
save_body_path = '/home/irum/Desktop/pedestrian-detection/BIG_BODY'
cur_date = (time.strftime("%Y-%m-%d"))
cur_time = (time.strftime("%H:%M:%S"))
new_pin =cur_date+"-"+cur_time
filename1 = 'BIG'
filename2 = str(filename1)+"-"+str(new_pin)
print ("filename2",filename2)
sampleFile = ('%s/%s.jpg' % (save_body_path, filename2))
print ("sampleFile",sampleFile)
cv2.imwrite('%s/%s.jpg' % (save_body_path, filename2), body_big)
# upload body
upload_process = threading.Thread(target=upload_internet(filename2,sampleFile,save_body_path))
upload_process.start()
# show the output images
cv2.imshow("Before NMS", orig)
cv2.imshow("After NMS", image)
cv2.imshow("BIG BODY", frame)
# cv2.imshow("FACE", body_big2)
key = cv2.waitKey(10)
if key == 27:
break
Correction:
Use cThread = threading.Thread( target= , args=() ) to define a
new thread instance
Use cThread.start() to launch it, of course you don't have join since your process is continuous.
simplified code so I could test-run it at my end:
import time
import threading
import multiprocessing
from time import sleep
def upload_internet(filename,sampleFile,check_path):
print ("//// WAITING FOR SERVER RESPONSE")
time.sleep(3)
print ("RECEIVED SERVER RESPONSE \\\\\\")
filename = "filename"
sampleFile = "sampleFile"
save_body_path = "save_body_path"
key = 1
while True:
rects = range(0,10)
# draw the original bounding boxes
range_len_rects = range(len(rects))
for i in range_len_rects:
print("Main starts")
rects = range(0,10)
thread_list = []
for i in range_len_rects:
# upload body
thread_list.append ( threading.Thread( target=upload_internet, args=( filename + "-" + str(i) ,sampleFile,save_body_path) ) )
thread_list[i].start()
print ("Exiting Launch Thread loop :"+ str(i) + "/" + str(range_len_rects[i]) )
print("Main sleep for 10 seconds")
time.sleep(10);
if key == 27:
break
PS: Remember the thread is not destroyed and you must ensure the upload_internet() doesn't stuck in memory for any reason, or you can control number of instance you have and set cap and manage zombie threads to avoid process crash and bad memory management

AttributeError: 'dict' python opencv and openalpr

Just having a play around with drawing on webcam stream with opencv and and openalpr, I have both of them working on their own but when I add them together i get this error.
Invalid pattern provided: auwide
Valid patterns are located in the auwide.patterns file
Plate #1
Plate Confidence
- 6U01 82.790466
Traceback (most recent call last):
File "C:/Users/Alex/PycharmProjects/displayrec/testing.py", line 31, in
<module>
rec = plate.detectMultiScale(frame, 1.3, 5)
AttributeError: 'dict' object has no attribute 'detectMultiScale'
Process finished with exit code 1
I have had a read on here Python OpenCV face detection code sometimes raises `'tuple' object has no attribute 'shape'`
I tried adding it to my code but still get the same error.
it's weird because it works for a second and throws a partial plate but then crashes with that error above.
import numpy as np
import cv2
import sys
import os
from openalpr import Alpr
alpr = Alpr("auwide", "openalpr.conf", "runtime_data")
if not alpr.is_loaded():
print("Error loading OpenALPR")
sys.exit(1)
alpr.set_top_n(1)
alpr.set_default_region("auwide")
plate = cv2.CascadeClassifier('au.xml')
vc = cv2.VideoCapture(0)
while True:
ret, frame = vc.read()
img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
noise_removal = cv2.bilateralFilter(img_gray, 9, 75, 75)
equal_histogram = cv2.equalizeHist(noise_removal)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
morph_image = cv2.morphologyEx(equal_histogram, cv2.MORPH_OPEN, kernel,
iterations=15)
sub_morp_image = cv2.subtract(equal_histogram,morph_image)
rec = plate.detectMultiScale(frame, 1.3, 5)
for (x, y, w, h) in rec:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
roi_gray = frame[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
cv2.imshow("Result",frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if ret:
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.imwrite("img.jpg", sub_morp_image)
results = alpr.recognize_file("img.jpg")
i = 0
for plate in results['results']:
i += 1
print("Plate #%d" % i)
print(" %12s %12s" % ("Plate", "Confidence"))
for candidate in plate['candidates']:
prefix = "-"
if candidate['matches_template']:
prefix = "*"
print(" %s %12s%12f" % (prefix, candidate['plate'],
candidate['confidence']))
else:
break;
vc.release()
alpr.unload()
cv2.destroyAllWindows()

Categories