|
|
|
@ -44,7 +44,7 @@ |
|
|
|
|
//M*/ |
|
|
|
|
|
|
|
|
|
// Enter your kernel in this window |
|
|
|
|
#pragma OPENCL EXTENSION cl_amd_printf:enable |
|
|
|
|
//#pragma OPENCL EXTENSION cl_amd_printf:enable |
|
|
|
|
#define CV_HAAR_FEATURE_MAX 3 |
|
|
|
|
typedef int sumtype; |
|
|
|
|
typedef float sqsumtype; |
|
|
|
@ -144,6 +144,7 @@ __kernel void gpuRunHaarClassifierCascade_scaled2( |
|
|
|
|
candidate[outputoff + (lcl_id << 2) + 1] = (int4)0; |
|
|
|
|
candidate[outputoff + (lcl_id << 2) + 2] = (int4)0; |
|
|
|
|
candidate[outputoff + (lcl_id << 2) + 3] = (int4)0; |
|
|
|
|
|
|
|
|
|
for (int scalei = 0; scalei < loopcount; scalei++) |
|
|
|
|
{ |
|
|
|
|
int4 scaleinfo1; |
|
|
|
@ -155,7 +156,9 @@ __kernel void gpuRunHaarClassifierCascade_scaled2( |
|
|
|
|
float factor = as_float(scaleinfo1.w); |
|
|
|
|
float correction_t = correction[scalei]; |
|
|
|
|
int ystep = (int)(max(2.0f, factor) + 0.5f); |
|
|
|
|
for(int grploop=get_group_id(0);grploop<totalgrp;grploop+=grpnumx){ |
|
|
|
|
|
|
|
|
|
for (int grploop = get_group_id(0); grploop < totalgrp; grploop += grpnumx) |
|
|
|
|
{ |
|
|
|
|
int4 cascadeinfo = p[scalei]; |
|
|
|
|
int grpidy = grploop / grpnumperline; |
|
|
|
|
int grpidx = grploop - mul24(grpidy, grpnumperline); |
|
|
|
@ -181,11 +184,13 @@ __kernel void gpuRunHaarClassifierCascade_scaled2( |
|
|
|
|
variance_norm_factor = variance_norm_factor >= 0.f ? sqrt(variance_norm_factor) : 1.f; |
|
|
|
|
result = 1; |
|
|
|
|
nodecounter = startnode + nodecount * scalei; |
|
|
|
|
for(int stageloop = start_stage; stageloop < split_stage&&result; stageloop++ ) |
|
|
|
|
|
|
|
|
|
for (int stageloop = start_stage; stageloop < end_stage && result; stageloop++) |
|
|
|
|
{ |
|
|
|
|
float stage_sum = 0.f; |
|
|
|
|
int4 stageinfo = *(global int4 *)(stagecascadeptr + stageloop); |
|
|
|
|
float stagethreshold = as_float(stageinfo.y); |
|
|
|
|
|
|
|
|
|
for (int nodeloop = 0; nodeloop < stageinfo.x; nodeloop++) |
|
|
|
|
{ |
|
|
|
|
__global GpuHidHaarTreeNode *currentnodeptr = (nodeptr + nodecounter); |
|
|
|
@ -210,80 +215,21 @@ __kernel void gpuRunHaarClassifierCascade_scaled2( |
|
|
|
|
stage_sum += classsum >= nodethreshold ? alpha2.y : alpha2.x; |
|
|
|
|
nodecounter++; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
result = (stage_sum >= stagethreshold); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (result && (ix < width) && (iy < height)) |
|
|
|
|
{ |
|
|
|
|
int queueindex = atomic_inc(lclcount); |
|
|
|
|
lcloutindex[queueindex << 1] = (y << 16) | x; |
|
|
|
|
lcloutindex[(queueindex << 1) + 1] = as_int(variance_norm_factor); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
barrier(CLK_LOCAL_MEM_FENCE); |
|
|
|
|
int queuecount = lclcount[0]; |
|
|
|
|
nodecounter = splitnode + nodecount * scalei; |
|
|
|
|
for(int stageloop=split_stage;stageloop<end_stage&&queuecount>0;stageloop++) |
|
|
|
|
{ |
|
|
|
|
lclcount[0]=0; |
|
|
|
|
barrier(CLK_LOCAL_MEM_FENCE); |
|
|
|
|
int2 stageinfo=*(global int2*)(stagecascadeptr+stageloop); |
|
|
|
|
float stagethreshold=as_float(stageinfo.y); |
|
|
|
|
int perfscale=queuecount>4?3:2; |
|
|
|
|
int queuecount_loop=(queuecount+(1<<perfscale)-1)>>perfscale; |
|
|
|
|
int lcl_compute_win=lcl_sz>>perfscale; |
|
|
|
|
int lcl_compute_win_id=(lcl_id>>(6-perfscale)); |
|
|
|
|
int lcl_loops=(stageinfo.x+lcl_compute_win-1)>>(6-perfscale); |
|
|
|
|
int lcl_compute_id=lcl_id-(lcl_compute_win_id<<(6-perfscale)); |
|
|
|
|
for(int queueloop=0;queueloop<queuecount_loop&&lcl_compute_win_id<queuecount;queueloop++) |
|
|
|
|
{ |
|
|
|
|
float stage_sum=0.f; |
|
|
|
|
int temp_coord=lcloutindex[lcl_compute_win_id<<1]; |
|
|
|
|
float variance_norm_factor=as_float(lcloutindex[(lcl_compute_win_id<<1)+1]); |
|
|
|
|
int queue_offset=mad24(((temp_coord&(int)0xffff0000)>>16),step,temp_coord&0xffff); |
|
|
|
|
int tempnodecounter=lcl_compute_id; |
|
|
|
|
float part_sum=0.f; |
|
|
|
|
for(int lcl_loop=0;lcl_loop<lcl_loops&&tempnodecounter<stageinfo.x;lcl_loop++) |
|
|
|
|
{ |
|
|
|
|
__global GpuHidHaarTreeNode* currentnodeptr = (nodeptr + nodecounter + tempnodecounter); |
|
|
|
|
int4 info1 = *(__global int4*)(&(currentnodeptr->p[0][0])); |
|
|
|
|
int4 info2 = *(__global int4*)(&(currentnodeptr->p[1][0])); |
|
|
|
|
int4 info3 = *(__global int4*)(&(currentnodeptr->p[2][0])); |
|
|
|
|
float4 w = *(__global float4*)(&(currentnodeptr->weight[0])); |
|
|
|
|
float2 alpha2 = *(__global float2*)(&(currentnodeptr->alpha[0])); |
|
|
|
|
float nodethreshold = w.w * variance_norm_factor; |
|
|
|
|
info1.x +=queue_offset; |
|
|
|
|
info1.z +=queue_offset; |
|
|
|
|
info2.x +=queue_offset; |
|
|
|
|
info2.z +=queue_offset; |
|
|
|
|
float classsum = (sum[mad24(info1.y,step,info1.x)] - sum[mad24(info1.y,step,info1.z)] - |
|
|
|
|
sum[mad24(info1.w,step,info1.x)] + sum[mad24(info1.w,step,info1.z)]) * w.x; |
|
|
|
|
classsum += (sum[mad24(info2.y,step,info2.x)] - sum[mad24(info2.y,step,info2.z)] - |
|
|
|
|
sum[mad24(info2.w,step,info2.x)] + sum[mad24(info2.w,step,info2.z)]) * w.y; |
|
|
|
|
|
|
|
|
|
info3.x +=queue_offset; |
|
|
|
|
info3.z +=queue_offset; |
|
|
|
|
classsum += (sum[mad24(info3.y,step,info3.x)] - sum[mad24(info3.y,step,info3.z)] - |
|
|
|
|
sum[mad24(info3.w,step,info3.x)] + sum[mad24(info3.w,step,info3.z)]) * w.z; |
|
|
|
|
part_sum += classsum >= nodethreshold ? alpha2.y : alpha2.x; |
|
|
|
|
tempnodecounter+=lcl_compute_win; |
|
|
|
|
} |
|
|
|
|
partialsum[lcl_id]=part_sum; |
|
|
|
|
barrier(CLK_LOCAL_MEM_FENCE); |
|
|
|
|
for(int i=0;i<lcl_compute_win&&(lcl_compute_id==0);i++) |
|
|
|
|
{ |
|
|
|
|
stage_sum+=partialsum[lcl_id+i]; |
|
|
|
|
} |
|
|
|
|
if(stage_sum>=stagethreshold&&(lcl_compute_id==0)) |
|
|
|
|
{ |
|
|
|
|
int queueindex=atomic_inc(lclcount); |
|
|
|
|
lcloutindex[queueindex<<1]=temp_coord; |
|
|
|
|
lcloutindex[(queueindex<<1)+1]=as_int(variance_norm_factor); |
|
|
|
|
} |
|
|
|
|
lcl_compute_win_id+=(1<<perfscale); |
|
|
|
|
barrier(CLK_LOCAL_MEM_FENCE); |
|
|
|
|
} |
|
|
|
|
queuecount=lclcount[0]; |
|
|
|
|
nodecounter+=stageinfo.x; |
|
|
|
|
} |
|
|
|
|
if (lcl_id < queuecount) |
|
|
|
|
{ |
|
|
|
|
int temp = lcloutindex[lcl_id << 1]; |
|
|
|
@ -297,6 +243,7 @@ __kernel void gpuRunHaarClassifierCascade_scaled2( |
|
|
|
|
atomic_inc(glboutindex); |
|
|
|
|
candidate[outputoff + temp + lcl_id] = candidate_result; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
barrier(CLK_LOCAL_MEM_FENCE); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
@ -308,15 +255,19 @@ __kernel void gpuscaleclassifier(global GpuHidHaarTreeNode * orinode, global Gpu |
|
|
|
|
int tr_x[3], tr_y[3], tr_h[3], tr_w[3], i = 0; |
|
|
|
|
GpuHidHaarTreeNode t1 = *(orinode + counter); |
|
|
|
|
#pragma unroll |
|
|
|
|
for(i=0;i<3;i++){ |
|
|
|
|
|
|
|
|
|
for (i = 0; i < 3; i++) |
|
|
|
|
{ |
|
|
|
|
tr_x[i] = (int)(t1.p[i][0] * scale + 0.5f); |
|
|
|
|
tr_y[i] = (int)(t1.p[i][1] * scale + 0.5f); |
|
|
|
|
tr_w[i] = (int)(t1.p[i][2] * scale + 0.5f); |
|
|
|
|
tr_h[i] = (int)(t1.p[i][3] * scale + 0.5f); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
t1.weight[0] = t1.p[2][0] ? -(t1.weight[1] * tr_h[1] * tr_w[1] + t1.weight[2] * tr_h[2] * tr_w[2]) / (tr_h[0] * tr_w[0]) : -t1.weight[1] * tr_h[1] * tr_w[1] / (tr_h[0] * tr_w[0]); |
|
|
|
|
counter += nodenum; |
|
|
|
|
#pragma unroll |
|
|
|
|
|
|
|
|
|
for (i = 0; i < 3; i++) |
|
|
|
|
{ |
|
|
|
|
newnode[counter].p[i][0] = tr_x[i]; |
|
|
|
@ -325,6 +276,7 @@ __kernel void gpuscaleclassifier(global GpuHidHaarTreeNode * orinode, global Gpu |
|
|
|
|
newnode[counter].p[i][3] = tr_y[i] + tr_h[i]; |
|
|
|
|
newnode[counter].weight[i] = t1.weight[i] * weight_scale; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
newnode[counter].left = t1.left; |
|
|
|
|
newnode[counter].right = t1.right; |
|
|
|
|
newnode[counter].threshold = t1.threshold; |
|
|
|
|