How to pan and zoom video in OpenCV? - python

I am trying to implement pan and zoom functions in my RTSP camera stream using Python. I want to be able to use my mouse wheel to zoom in and out from the video and click and drag to move the video around. However, I have not been able to find any tutorials on handling these mouse events in OpenCV.
Is there such a way or will I have resort to using keystrokes to pan and zoom my video?

You can use mouse event and imshow to achieve this function.
def mouse_event_callback(self, event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
elif event == cv2.EVENT_MOUSEMOVE:
elif event == cv2.EVENT_LBUTTONUP:
elif event == cv2.EVENT_MOUSEWHEEL:
...
cv2.namedWindow('Test')
cv2.setMouseCallback('Test',self.mouse_event_callback)
while True:
cv2.imshow('Test',img)

The basic idea is deciding the scale changed every time on mouse wheel. After you get the current scale (v.s. origin image) and correct region of image you want to show on screen, you can get the position and length of rectangle on scaled image. So you can draw this rectangle on scaled image.
In my github,checking OnMouseWheel () and RefreshSrcView () in Fastest_Image_Pattern_Matching/ELCVMatchTool/ELCVMatchToolDlg.cpp may give what you want.
Although it's c++ code, but only imshow () and resize () are used. Focusing on how I change the scale and how the new rectangle to be draw in scaled image will be enough.
Effect:
Part of the code:
BOOL CELCVMatchToolDlg::OnMouseWheel (UINT nFlags, short zDelta, CPoint pt)
{
POINT pointCursor;
GetCursorPos (&pointCursor);
ScreenToClient (&pointCursor);
// TODO: 在此加入您的訊息處理常式程式碼和 (或) 呼叫預設值
if (zDelta > 0)
{
if (m_iScaleTimes == MAX_SCALE_TIMES)
return TRUE;
else
m_iScaleTimes++;
}
if (zDelta < 0)
{
if (m_iScaleTimes == MIN_SCALE_TIMES)
return TRUE;
else
m_iScaleTimes--;
}
CRect rect;
//GetWindowRect (rect);
GetDlgItem (IDC_STATIC_SRC_VIEW)->GetWindowRect (rect);//重要
if (m_iScaleTimes == 0)
g_dCompensationX = g_dCompensationY = 0;
int iMouseOffsetX = pt.x - (rect.left + 1);
int iMouseOffsetY = pt.y - (rect.top + 1);
double dPixelX = (m_hScrollBar.GetScrollPos () + iMouseOffsetX + g_dCompensationX) / m_dNewScale;
double dPixelY = (m_vScrollBar.GetScrollPos () + iMouseOffsetY + g_dCompensationY) / m_dNewScale;
m_dNewScale = m_dSrcScale * pow (SCALE_RATIO, m_iScaleTimes);
if (m_iScaleTimes != 0)
{
int iWidth = m_matSrc.cols;
int iHeight = m_matSrc.rows;
m_hScrollBar.SetScrollRange (0, int (m_dNewScale * iWidth - m_dSrcScale * iWidth) - 1 + BAR_SIZE);
m_vScrollBar.SetScrollRange (0, int (m_dNewScale * iHeight - m_dSrcScale * iHeight) - 1 + BAR_SIZE);
int iBarPosX = int (dPixelX * m_dNewScale - iMouseOffsetX + 0.5);
m_hScrollBar.SetScrollPos (iBarPosX);
m_hScrollBar.ShowWindow (SW_SHOW);
g_dCompensationX = -iBarPosX + (dPixelX * m_dNewScale - iMouseOffsetX);
int iBarPosY = int (dPixelY * m_dNewScale - iMouseOffsetY + 0.5);
m_vScrollBar.SetScrollPos (iBarPosY);
m_vScrollBar.ShowWindow (SW_SHOW);
g_dCompensationY = -iBarPosY + (dPixelY * m_dNewScale - iMouseOffsetY);
//滑塊大小
SCROLLINFO infoH;
infoH.cbSize = sizeof (SCROLLINFO);
infoH.fMask = SIF_PAGE;
infoH.nPage = BAR_SIZE;
m_hScrollBar.SetScrollInfo (&infoH);
SCROLLINFO infoV;
infoV.cbSize = sizeof (SCROLLINFO);
infoV.fMask = SIF_PAGE;
infoV.nPage = BAR_SIZE;
m_vScrollBar.SetScrollInfo (&infoV);
//滑塊大小
}
else
{
m_hScrollBar.SetScrollPos (0);
m_hScrollBar.ShowWindow (SW_HIDE);
m_vScrollBar.SetScrollPos (0);
m_vScrollBar.ShowWindow (SW_HIDE);
}
RefreshSrcView ();
return CDialogEx::OnMouseWheel (nFlags, zDelta, pt);
}

Related

Any method like np.concatenate to use in c++?

I recently study with Dicom images deep learning. I want to move my dicom image processing code from python to c++.
Following is my python code.
def window_img(dcm, width=None, level=None):
pixels = dcm.pixel_array * dcm.RescaleSlope + dcm.RescaleIntercept
lower = level - (width / 2)
upper = level + (width / 2)
pixels[pixels<lower] = lower
pixels[pixels>upper] = upper
pixels = (pixels - (level - width//2)) / width
return pixels
def meta_dicom_convert(dcm, ww, wc):
b = window_img(dcm, ww[0], wc[0])
g = window_img(dcm, ww[1], wc[1])
r = window_img(dcm, ww[2], wc[2])
image = np.concatenate([b, g, r], axis=2)
return image
And I try to do something like this in c++. However, I only found some method like cv::merge. I use cv::merge. But it seems a little differet from images combined by np.concatenate.
My C++ code:
DcmFileFormat dfile;
OFCondition result = dfile.loadFile("D:\\python\\RSNA\\stage_2_train\\ID_c8355f255.dcm");
DcmDataset *dcmdataset = dfile.getDataset();
double slope, intercept;
dcmdataset->findAndGetFloat64(DCM_RescaleSlope, slope);
dcmdataset->findAndGetFloat64(DCM_RescaleIntercept, intercept);
DicomImage dcmimage_brain("D:\\python\\RSNA\\stage_2_train\\ID_c8355f255.dcm");
DicomImage dcmimage_subdural("D:\\python\\RSNA\\stage_2_train\\ID_c8355f255.dcm");
DicomImage dcmimage_bone("D:\\python\\RSNA\\stage_2_train\\ID_c8355f255.dcm");
int nWidth = dcmimage_brain.getWidth();
int nHeight = dcmimage_brain.getHeight();
cout << "size = " << nWidth << " x " << nHeight << endl;
int wcenter = 40, wwidth = 80;
int lower = wcenter - (wwidth / 2);
int upper = wcenter + (wwidth / 2);
dcmimage_brain.setWindow(wcenter, wwidth);
Uint8 *pixeldata_brain = (Uint8*)(dcmimage_brain.getOutputData(8));
if (pixeldata_brain)
{
for (int i = 0; i < nWidth * nHeight; ++i) {
if (pixeldata_brain[i] < lower)
pixeldata_brain[i] = lower;
if (pixeldata_brain[i] > upper)
pixeldata_brain[i] = upper;
pixeldata_brain[i] = (255 * (pixeldata_brain[i] - lower)) / (upper - lower);
}
}
Mat brain = Mat(nWidth, nHeight, CV_8UC1, pixeldata_brain);
//brain.convertTo(brain, CV_8UC1);
wcenter = 80;
wwidth = 200;
lower = wcenter - (wwidth / 2);
upper = wcenter + (wwidth / 2);
dcmimage_subdural.setWindow(wcenter, wwidth);
Uint8 *pixeldata_subdural = (Uint8*)(dcmimage_subdural.getOutputData(8));
if (pixeldata_subdural)
{
for (int i = 0; i < nWidth * nHeight; ++i) {
if (pixeldata_subdural[i] < lower)
pixeldata_subdural[i] = lower;
if (pixeldata_subdural[i] > upper)
pixeldata_subdural[i] = upper;
pixeldata_subdural[i] = (255 * (pixeldata_subdural[i] - lower)) / (upper - lower);
}
}
Mat subdural = Mat(nWidth, nHeight, CV_8UC1, pixeldata_subdural);
//subdural.convertTo(subdural, CV_8UC1);
wcenter = 40;
wwidth = 380;
lower = wcenter - (wwidth / 2);
upper = wcenter + (wwidth / 2);
dcmimage_bone.setWindow(wcenter, wwidth);
Uint8 *pixeldata_bone = (Uint8*)(dcmimage_bone.getOutputData(8));
if (pixeldata_bone) {
for (int i = 0; i < nWidth * nHeight; ++i) {
if (pixeldata_bone[i] < lower)
pixeldata_bone[i] = lower;
if (pixeldata_bone[i] > upper)
pixeldata_bone[i] = upper;
pixeldata_bone[i] = (255 * (pixeldata_bone[i] - lower) / (upper - lower));
}
}
Mat bone = Mat(nWidth, nHeight, CV_8UC1, pixeldata_bone);
imshow("bone", bone);
//bone.convertTo(bone, CV_8UC1);
//Mat bone = Mat(nWidth, nHeight, CV_16UC1, pbone);
//bone.convertTo(bone, CV_8UC1);
// new Image size
Mat image = Mat::zeros(nWidth, nHeight, CV_8UC3);
vector<Mat> channels;
split(image, channels);
channels.at(0) = brain;
channels.at(1) = subdural;
channels.at(2) = bone;
merge(channels, image);
imshow("mergeImage", image);
waitKey();
result image: (left side: cv::merge) (right side: np.concatenate)
enter image description here
Is cv::merge actually not just concatenate images in channels?
Is there any other method I can use in c++ to do thing like np.concatenate?
Any reply would be appreciate. thanks

Problem with two dynamically updated vertical lines on two charts in Chart.js

I have a problem with updating the position of vertical lines simultaneously on plots using Chart.js. What I want to do is to draw vertical line in a specific x postion when mouse pointer is on another graph. The problem is that with the current code, after moving mouse pointer over one plot in the second I have plotted line but the plot doesn't refresh, thus after moving again pointer there are a bunch of other lines.
I was trying including update() option before drawing vertical lines which actually solves the problem but the whole chart is refreshed and it's very slow.
Thx for the help!
Chart.defaults.LineWithLine = Chart.defaults.scatter
Chart.controllers.LineWithLine = Chart.controllers.scatter.extend({
draw: function(ease) {
Chart.controllers.scatter.prototype.draw.call(this, ease);
if (this.chart.tooltip._active && this.chart.tooltip._active.length) {
var activePoint = this.chart.tooltip._active[0],
ctx = this.chart.ctx,
x = activePoint.tooltipPosition().x,
topY = this.chart.scales['y-axis-1'].top,
bottomY = this.chart.scales['y-axis-1'].bottom;
// draw line
ctx.save();
ctx.beginPath();
ctx.moveTo(x, topY);
ctx.lineTo(x, bottomY);
ctx.lineWidth = 1.5;
ctx.strokeStyle = 'black';
ctx.stroke();
ctx.restore();
// get x value
var xValue = map(x, this.chart.chartArea.left, this.chart.chartArea.right, chainage_min, chainage_max);
if (this.chart == graph2) {
try {
// graph1.update() // drastically slows down
} finally {
//
}
var activePoint = graph2.tooltip._active[0],
ctx2 = graph1.ctx,
x = graph1.scales['x-axis-1'].getPixelForValue(xValue)
topY = graph1.scales['y-axis-1'].top,
bottomY = graph1.scales['y-axis-1'].bottom;
// draw line
ctx2.save();
ctx2.beginPath();
ctx2.moveTo(x, topY);
ctx2.lineTo(x, bottomY);
ctx2.lineWidth = 2.0;
ctx2.strokeStyle = 'black';
ctx2.stroke();
ctx2.restore();
} else if (this.chart == graph1) {
try {
//graph2.update() // drastically slows down
} finally {
//
}
var activePoint = graph1.tooltip._active[0],
ctx2 = graph2.ctx,
x = graph2.scales['x-axis-1'].getPixelForValue(xValue)
topY = graph2.scales['y-axis-1'].top,
bottomY = graph2.scales['y-axis-1'].bottom;
// draw line
ctx2.save();
ctx2.beginPath();
ctx2.moveTo(x, topY);
ctx2.lineTo(x, bottomY);
ctx2.lineWidth = 2.0;
ctx2.strokeStyle = 'black';
ctx2.stroke();
ctx2.restore();
}
}
}
})
function map(value, start1, stop1, start2, stop2) {
return start2 + (stop2 - start2) * ((value - start1) / (stop1 - start1))
}

Draw Longest Straight Line in Contours OpenCv [duplicate]

I am using OpenCV and Python. I am trying to draw the longest line inside a contours.
I have a contour named cnt. The image is binary, the inside of the contours is white and the outside is black. I would like to draw the longest line inside the white contours. I found how to draw lines using cv2.lines but I didn't find how to draw the longest one. Do you have any ideas?
img_copy = cv2.dilate(copy.deepcopy(img), np.ones((2,2),np.uint8),iterations = 2)
contours, hierarchy = cv2.findContours(copy.deepcopy(img_copy),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt = contours[max_index]
The below method using to draw the number of number of lines from the image and get the degree of max value try this. its working fine
Mat Compute_skewAngle (Mat& src,Mat& src_gray,int drawLine) {
int thresh = 100;
RNG rng(12345);
// 1. Load Gray Scae Image
// 2. Get Size of Image
cv::Size size = src_gray.size();
// 3. blur the Grayscale image
cv::blur(src_gray, src_gray, cv::Size(3,3) );
cv::Mat threshold_output;
std::vector<std::vector<cv::Point> > contours;
std::vector<Vec4i> hierarchy;
// 4. Detect edges using Threshold / Canny edge Detector
//cv::threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY );
Mat dst, cdst;
cv::Canny(src_gray, dst, thresh, 200, 3);
// 5. Gray Image to BGR
cvtColor(dst, cdst, CV_GRAY2BGR);
#if 0
vector<Vec2f> lines;
HoughLines(dst, lines, 1, CV_PI/180, 100, 0, 0 );
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
line( cdst, pt1, pt2, Scalar(0,0,255), 3, CV_AA);
}
#else
vector<Vec4i> lines;
double angle = 0.;
int countNegative = 0;
int countPositive =0;
HoughLinesP(dst, lines, 1, CV_PI/180, 100, 10, 100);
NSMutableDictionary *angleCountDict = [[NSMutableDictionary alloc] init];
for( size_t i = 0; i < lines.size(); i++ )
{
if(drawLine == 1) { // draw line while pass flag value 1
Vec4i l = lines[i];
line( cdst, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), Scalar(0,0,255), 3, CV_AA);
}
double delta_y = lines[i][3] - lines[i][1];
double delta_x = lines[i][2] - lines[i][0];
double currentAngle =atan2(delta_y,delta_x);
int angleAsDeg = abs(currentAngle * 180 / CV_PI);
NSString *_retValue = [angleCountDict objectForKey:[NSString stringWithFormat:#"%d", angleAsDeg]];
int angleCount = [_retValue intValue];
[angleCountDict setObject:[NSNumber numberWithInt:angleCount + 1] forKey:[NSString stringWithFormat:#"%d", angleAsDeg]];
double slope = delta_y / delta_x ; // find the slope to detect the angle " - " or " + "
if(slope < 0)
countNegative ++;
else
countPositive ++;
}
#endif
// sort the dictionary to get the largest value of degree count
NSArray *blockSortedKeys = [angleCountDict keysSortedByValueUsingComparator: ^(id obj1, id obj2) {
return [obj2 compare:obj1];
}];
NSString *degreeVal;
if([blockSortedKeys count] > 0)
degreeVal = [blockSortedKeys objectAtIndex:0];
angle = [degreeVal doubleValue];
if(countNegative > countPositive) {
angle = - angle;
}
Mat outPut;
outPut = rotateMatImage(src,angle,cdst);
return outPut;
}

Draw Longest Line in Contours OPENCV

I am using OpenCV and Python. I am trying to draw the longest line inside a contours.
I have a contour named cnt. The image is binary, the inside of the contours is white and the outside is black. I would like to draw the longest line inside the white contours. I found how to draw lines using cv2.lines but I didn't find how to draw the longest one. Do you have any ideas?
img_copy = cv2.dilate(copy.deepcopy(img), np.ones((2,2),np.uint8),iterations = 2)
contours, hierarchy = cv2.findContours(copy.deepcopy(img_copy),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt = contours[max_index]
The below method using to draw the number of number of lines from the image and get the degree of max value try this. its working fine
Mat Compute_skewAngle (Mat& src,Mat& src_gray,int drawLine) {
int thresh = 100;
RNG rng(12345);
// 1. Load Gray Scae Image
// 2. Get Size of Image
cv::Size size = src_gray.size();
// 3. blur the Grayscale image
cv::blur(src_gray, src_gray, cv::Size(3,3) );
cv::Mat threshold_output;
std::vector<std::vector<cv::Point> > contours;
std::vector<Vec4i> hierarchy;
// 4. Detect edges using Threshold / Canny edge Detector
//cv::threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY );
Mat dst, cdst;
cv::Canny(src_gray, dst, thresh, 200, 3);
// 5. Gray Image to BGR
cvtColor(dst, cdst, CV_GRAY2BGR);
#if 0
vector<Vec2f> lines;
HoughLines(dst, lines, 1, CV_PI/180, 100, 0, 0 );
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
line( cdst, pt1, pt2, Scalar(0,0,255), 3, CV_AA);
}
#else
vector<Vec4i> lines;
double angle = 0.;
int countNegative = 0;
int countPositive =0;
HoughLinesP(dst, lines, 1, CV_PI/180, 100, 10, 100);
NSMutableDictionary *angleCountDict = [[NSMutableDictionary alloc] init];
for( size_t i = 0; i < lines.size(); i++ )
{
if(drawLine == 1) { // draw line while pass flag value 1
Vec4i l = lines[i];
line( cdst, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), Scalar(0,0,255), 3, CV_AA);
}
double delta_y = lines[i][3] - lines[i][1];
double delta_x = lines[i][2] - lines[i][0];
double currentAngle =atan2(delta_y,delta_x);
int angleAsDeg = abs(currentAngle * 180 / CV_PI);
NSString *_retValue = [angleCountDict objectForKey:[NSString stringWithFormat:#"%d", angleAsDeg]];
int angleCount = [_retValue intValue];
[angleCountDict setObject:[NSNumber numberWithInt:angleCount + 1] forKey:[NSString stringWithFormat:#"%d", angleAsDeg]];
double slope = delta_y / delta_x ; // find the slope to detect the angle " - " or " + "
if(slope < 0)
countNegative ++;
else
countPositive ++;
}
#endif
// sort the dictionary to get the largest value of degree count
NSArray *blockSortedKeys = [angleCountDict keysSortedByValueUsingComparator: ^(id obj1, id obj2) {
return [obj2 compare:obj1];
}];
NSString *degreeVal;
if([blockSortedKeys count] > 0)
degreeVal = [blockSortedKeys objectAtIndex:0];
angle = [degreeVal doubleValue];
if(countNegative > countPositive) {
angle = - angle;
}
Mat outPut;
outPut = rotateMatImage(src,angle,cdst);
return outPut;
}

Conversion of PyQt implementation of drawEllipse into Matlab

I have a python program which draws some ellipses into a window. The following python code is used w
from PyQt4.QtGui import *
def draw_ellipse(self, center, rad_x, rad_y, angle, color):
qp = QtGui.QPainter()
qp.begin(self)
qp.translate(center)
qp.rotate(math.degrees(angle))
qp.setPen(QtGui.QColor(color))
qp.drawEllipse(QPoint(0, 0), rad_x, rad_y)
qp.end()
As you can see my input parameters are center, rad_x, rad_y and angle. These parameters are read in from a text file.
I want to use the very same parameter file in a Matlab program. For that I need to know the implementation of drawEllipse, so that I can implement the very same functionality in Matlab.
Unfortunately I don't seem to find the source code for drawEllipse. I found this link with the following code:
03114 {
03115 #ifdef QT_DEBUG_DRAW
03116 if (qt_show_painter_debug_output)
03117 printf("QPainter::drawEllipse(), [%.2f,%.2f,%.2f,%.2f]\n", r.x(), r.y(), r.width(), r.height());
03118 #endif
03119
03120 if (!isActive())
03121 return;
03122 Q_D(QPainter);
03123 d->updateState(d->state);
03124
03125 QRectF rect(r.normalized());
03126
03127 if (rect.isEmpty())
03128 return;
03129
03130 if (d->state->emulationSpecifier) {
03131 if (d->state->emulationSpecifier == QPaintEngine::PrimitiveTransform
03132 && d->state->txop == QPainterPrivate::TxTranslate) {
03133 rect.translate(QPointF(d->state->matrix.dx(), d->state->matrix.dy()));
03134 } else {
03135 QPainterPath path;
03136 path.addEllipse(rect);
03137 d->draw_helper(path, QPainterPrivate::StrokeAndFillDraw);
03138 return;
03139 }
03140 }
03141
03142 d->engine->drawEllipse(rect);
03143 }
Which leads me to this code (QPainterPath.addEllipse):
01052 void QPainterPath::addEllipse(const QRectF &boundingRect)
01053 {
01054 #ifndef QT_NO_DEBUG
01055 if (qIsNan(boundingRect.x()) || qIsNan(boundingRect.y())
01056 || qIsNan(boundingRect.width()) || qIsNan(boundingRect.height()))
01057 qWarning("QPainterPath::addEllipse: Adding ellipse where a parameter is NaN, results are undefined");
01058 #endif
01059 if (boundingRect.isNull())
01060 return;
01061
01062 ensureData();
01063 detach();
01064
01065 Q_D(QPainterPath);
01066 d->elements.reserve(d->elements.size() + 13);
01067
01068 QPointF pts[12];
01069 int point_count;
01070 QPointF start = qt_curves_for_arc(boundingRect, 0, 360, pts, &point_count);
01071
01072 moveTo(start);
01073 cubicTo(pts[0], pts[1], pts[2]); // 0 -> 270
01074 cubicTo(pts[3], pts[4], pts[5]); // 270 -> 180
01075 cubicTo(pts[6], pts[7], pts[8]); // 180 -> 90
01076 cubicTo(pts[9], pts[10], pts[11]); // 90 - >0
01077 d_func()->require_moveTo = true;
01078 }
So let's go into qstroker_8cpp and look at qt_curves_for_arc:
00722 QPointF qt_curves_for_arc(const QRectF &rect, qreal startAngle, qreal sweepLength,
00723 QPointF *curves, int *point_count)
00724 {
00725 Q_ASSERT(point_count);
00726 Q_ASSERT(curves);
00727
00728 #ifndef QT_NO_DEBUG
00729 if (qIsNan(rect.x()) || qIsNan(rect.y()) || qIsNan(rect.width()) || qIsNan(rect.height())
00730 || qIsNan(startAngle) || qIsNan(sweepLength))
00731 qWarning("QPainterPath::arcTo: Adding arc where a parameter is NaN, results are undefined");
00732 #endif
00733 *point_count = 0;
00734
00735 if (rect.isNull()) {
00736 return QPointF();
00737 }
00738
00739 if (sweepLength > 360) sweepLength = 360;
00740 else if (sweepLength < -360) sweepLength = -360;
00741
00742 // Special case fast path
00743 if (startAngle == 0.0 && sweepLength == 360.0) {
00744 qreal x = rect.x();
00745 qreal y = rect.y();
00746
00747 qreal w = rect.width();
00748 qreal w2 = rect.width() / 2;
00749 qreal w2k = w2 * QT_PATH_KAPPA;
00750
00751 qreal h = rect.height();
00752 qreal h2 = rect.height() / 2;
00753 qreal h2k = h2 * QT_PATH_KAPPA;
00754
00755 // 0 -> 270 degrees
00756 curves[(*point_count)++] = QPointF(x + w, y + h2 + h2k);
00757 curves[(*point_count)++] = QPointF(x + w2 + w2k, y + h);
00758 curves[(*point_count)++] = QPointF(x + w2, y + h);
00759
00760 // 270 -> 180 degrees
00761 curves[(*point_count)++] = QPointF(x + w2 - w2k, y + h);
00762 curves[(*point_count)++] = QPointF(x, y + h2 + h2k);
00763 curves[(*point_count)++] = QPointF(x, y + h2);
00764
00765 // 180 -> 90 degrees
00766 curves[(*point_count)++] = QPointF(x, y + h2 - h2k);
00767 curves[(*point_count)++] = QPointF(x + w2 - w2k, y);
00768 curves[(*point_count)++] = QPointF(x + w2, y);
00769
00770 // 90 -> 0 degrees
00771 curves[(*point_count)++] = QPointF(x + w2 + w2k, y);
00772 curves[(*point_count)++] = QPointF(x + w, y + h2 - h2k);
00773 curves[(*point_count)++] = QPointF(x + w, y + h2);
00774
00775 return QPointF(x + w, y + h2);
00776 }
00777
00778 #define ANGLE(t) ((t) * 2 * Q_PI / 360.0)
00779 #define SIGN(t) (t > 0 ? 1 : -1)
00780 qreal a = rect.width() / 2.0;
00781 qreal b = rect.height() / 2.0;
00782
00783 qreal absSweepLength = (sweepLength < 0 ? -sweepLength : sweepLength);
00784 int iterations = (int)ceil((absSweepLength) / 90.0);
00785
00786 QPointF first_point;
00787
00788 if (iterations == 0) {
00789 first_point = rect.center() + QPointF(a * qCos(ANGLE(startAngle)),
00790 -b * qSin(ANGLE(startAngle)));
00791 } else {
00792 qreal clength = sweepLength / iterations;
00793 qreal cosangle1, sinangle1, cosangle2, sinangle2;
00794
00795 for (int i=0; i<iterations; ++i) {
00796 qreal cangle = startAngle + i * clength;
00797
00798 cosangle1 = qCos(ANGLE(cangle));
00799 sinangle1 = qSin(ANGLE(cangle));
00800 cosangle2 = qCos(ANGLE(cangle + clength));
00801 sinangle2 = qSin(ANGLE(cangle + clength));
00802
00803 // Find the start and end point of the curve.
00804 QPointF startPoint = rect.center() + QPointF(a * cosangle1, -b * sinangle1);
00805 QPointF endPoint = rect.center() + QPointF(a * cosangle2, -b * sinangle2);
00806
00807 // The derived at the start and end point.
00808 qreal sdx = -a * sinangle1;
00809 qreal sdy = -b * cosangle1;
00810 qreal edx = -a * sinangle2;
00811 qreal edy = -b * cosangle2;
00812
00813 // Creating the tangent lines. We need to reverse their direction if the
00814 // sweep is negative (clockwise)
00815 QLineF controlLine1(startPoint, startPoint + SIGN(sweepLength) * QPointF(sdx, sdy));
00816 QLineF controlLine2(endPoint, endPoint - SIGN(sweepLength) * QPointF(edx, edy));
00817
00818 // We need to scale down the control lines to match that of the current sweeplength.
00819 // qAbs because we only want to scale, not change direction.
00820 qreal kappa = QT_PATH_KAPPA * qAbs(clength) / 90.0;
00821 // Adjust their length to fit the magic KAPPA length.
00822 controlLine1.setLength(controlLine1.length() * kappa);
00823 controlLine2.setLength(controlLine2.length() * kappa);
00824
00825 curves[(*point_count)++] = controlLine1.p2();
00826 curves[(*point_count)++] = controlLine2.p2();
00827 curves[(*point_count)++] = endPoint;
00828
00829 if (i == 0)
00830 first_point = startPoint;
00831 }
00832 }
00833
00834 return first_point;
00835 }
This is quite a lot of code for drawing a simple ellipse! It doesn't feel right to rewrite all of this in Matlab, if a simple ellipse can be plotted like this:
a = rad_x; % horizontal radius
b = rad_y; % vertical radius
x0 = center_x; % x0, y0 ellipse centre coordinates
y0 = center_y;
steps = 50;
t = linspace(0, 2*pi, steps);
theta0 = angle;
x = x0 + (a * sin(t - theta0));
y = y0 + (b * cos(t));
plot(x, y, '.-'),
Question:
given the four parameters listed above (center, rad_x, rad_y and angle), what's the easiest way for me to plot the ellipse correctly in matlab? With my matlab code above plotting currently only works for small angles and particular rad_x & rad_y combinations.
How about something like this?
a = rad_x;
b = rad_y;
r0 = center_x + i*center_y; % <-- origin of ellipse
theta0=angle; % <-- angle of rotation of ellipse
steps = 200;
t = linspace(0, 2*pi, steps);
r = a*sin(t) + i*b*cos(t);
R = exp(i*theta0);
r = R*r;
r = r0 + r;
figure(1), hold on
plot(real(r), imag(r), '-r')
Using matrices rather than complex numbers:
a = rad_x;
b = rad_y;
r0 = [center_x ; center_y]; % <-- origin of ellipse
theta0=angle; % <-- angle of rotation of ellipse
steps = 200;
t = linspace(0, 2*pi, steps);
r = [a*sin(t) ; b*cos(t)];
R = [cos(theta0) -sin(theta0) ; sin(theta0) cos(theta0)]; % <-- note neg signs: define direction of rotation!
r = R*r;
r = r0*ones(1,length(t)) + r;
figure(1)
plot(r(1,:), r(2,:), '-r')

Categories