Я новичок в использовании ReactiveX, и мне предложили следующую проблему:
import android.content.pm.ActivityInfo
import android.os.Bundle
import android.os.Environment
import android.support.design.widget.FloatingActionButton
import android.support.v4.app.Fragment
import org.opencv.android.BaseLoaderCallback
import org.opencv.android.CameraBridgeViewBase
import org.opencv.android.LoaderCallbackInterface
import org.opencv.android.OpenCVLoader
import org.opencv.core.*
import org.opencv.imgproc.Imgproc
import org.opencv.objdetect.CascadeClassifier
import org.opencv.objdetect.Objdetect
import pe.gob.agrorural.sisap.R
import pe.gob.agrorural.sisap.util.FileUtils
import timber.log.Timber
import org.opencv.face.LBPHFaceRecognizer
import org.opencv.face.FaceRecognizer
import java.io.File
import android.support.v7.app.AlertDialog
import android.view.*
import android.widget.*
import io.reactivex.android.schedulers.AndroidSchedulers
import io.reactivex.disposables.CompositeDisposable
import io.reactivex.schedulers.Schedulers
import pe.gob.agrorural.sisap.App
import pe.gob.agrorural.sisap.repository.data.DetalleTrabajador
import pe.gob.agrorural.sisap.repository.data.Trabajador
import pe.gob.agrorural.sisap.viewmodel.TrainViewModel
import kotlin.collections.ArrayList
class TrainFragment:Fragment() {
private lateinit var faceRecognitionCamera:CameraBridgeViewBase
private lateinit var mRgba:Mat
private lateinit var mGray:Mat
private lateinit var classifier:CascadeClassifier
private lateinit var faces:MatOfRect
//private lateinit var croppedImage : Mat
private lateinit var images:ArrayList<Mat>
private lateinit var imagesLabels:ArrayList<Int>
private val subscriptions = CompositeDisposable()
private lateinit var recognize:FaceRecognizer
private val trainViewModel : TrainViewModel = App.injectTrainViewModel()
private var mLoaderCallback = object: BaseLoaderCallback(context){
override fun onManagerConnected(status: Int) {
when(status){
LoaderCallbackInterface.SUCCESS ->{
faces = MatOfRect()
faceRecognitionCamera.enableView()
recognize = LBPHFaceRecognizer.create(3, 8, 8, 8, 200.0)
//Borrar todo lo de abajo despues
images = ArrayList<Mat>()
imagesLabels = ArrayList<Int>()
}
else -> super.onManagerConnected(status)
}
}
}
companion object {
fun nuevaInstancia()= TrainFragment()
}
override fun onCreateView(inflater: LayoutInflater, container: ViewGroup?,
savedInstanceState: Bundle?): View {
val view: View = inflater.inflate(R.layout.fragment_train, container, false)
faceRecognitionCamera = view.findViewById(R.id.face_train_java_camera_view) as CameraBridgeViewBase
val trainFace = view.findViewById(R.id.take_picture_floating_action_button) as FloatingActionButton
faceRecognitionCamera.setCameraIndex(CameraBridgeViewBase.CAMERA_ID_FRONT)
faceRecognitionCamera.visibility = SurfaceView.VISIBLE
faceRecognitionCamera.setCvCameraViewListener(object : CameraBridgeViewBase.CvCameraViewListener2{
override fun onCameraViewStarted(width: Int, height: Int) {
mRgba = Mat()
mGray = Mat()
classifier = FileUtils.loadXMLS(context!!,"lbpcascade_frontalface_improved.xml")
}
override fun onCameraViewStopped() {
mRgba.release()
mGray.release()
}
override fun onCameraFrame(inputFrame: CameraBridgeViewBase.CvCameraViewFrame?): Mat {
val mRgbaTmp = inputFrame!!.rgba()
val mGrayTmp = inputFrame.gray()
val orientation = faceRecognitionCamera.screenOrientation
if (faceRecognitionCamera.isEmulator) // Treat emulators as a special case
Core.flip(mRgbaTmp, mRgbaTmp, 1) // Flip along y-axis
else {
when (orientation) { // RGB image
ActivityInfo.SCREEN_ORIENTATION_PORTRAIT, ActivityInfo.SCREEN_ORIENTATION_REVERSE_PORTRAIT -> Core.flip(mRgbaTmp, mRgbaTmp, 0) // Flip along x-axis
ActivityInfo.SCREEN_ORIENTATION_LANDSCAPE, ActivityInfo.SCREEN_ORIENTATION_REVERSE_LANDSCAPE -> Core.flip(mRgbaTmp, mRgbaTmp, 1) // Flip along y-axis
}
when (orientation) { // Grayscale image
ActivityInfo.SCREEN_ORIENTATION_PORTRAIT -> {
Core.transpose(mGrayTmp, mGrayTmp) // Rotate image
Core.flip(mGrayTmp, mGrayTmp, -1) // Flip along both axis
}
ActivityInfo.SCREEN_ORIENTATION_REVERSE_PORTRAIT -> Core.transpose(mGrayTmp, mGrayTmp) // Rotate image
ActivityInfo.SCREEN_ORIENTATION_LANDSCAPE -> Core.flip(mGrayTmp, mGrayTmp, 1) // Flip along y-axis
ActivityInfo.SCREEN_ORIENTATION_REVERSE_LANDSCAPE -> Core.flip(mGrayTmp, mGrayTmp, 0) // Flip along x-axis
}
}
mGray = mGrayTmp
mRgba = mRgbaTmp
Imgproc.resize(mGray, mGray, Size(200.0, 200.0 / (mGray.width().toFloat() / mGray.height().toFloat())))
return mRgba
}
})
trainFace.setOnClickListener{
if(mGray.total()== 0.toLong())
Toast.makeText(context,"No se puede detectar algún rostro", Toast.LENGTH_SHORT).show()
classifier.detectMultiScale(mGray,faces,1.1,3, 0 or Objdetect.CASCADE_SCALE_IMAGE, Size(30.0,30.0))
if(faces.empty()){
Toast.makeText(context,"Rostro desconocido", Toast.LENGTH_SHORT).show()
}else{
if(faces.toArray().size>1){
Toast.makeText(context,"Solo está permitido un rostro por registro", Toast.LENGTH_SHORT).show()
}else{
if(mGray.total()!= 0.toLong()){
//croppedImages(mGray)
showDialog(mGray)
Toast.makeText(context,"Rostro Detectado", Toast.LENGTH_SHORT).show()
}else{
Timber.e("Imagen a escala de grises vacia")
}
}
}
}
return view
}
override fun onPause() {
super.onPause()
faceRecognitionCamera.disableView()
}
override fun onResume() {
super.onResume()
if (!OpenCVLoader.initDebug()) {
Timber.e( "Internal OpenCV library not found. Using OpenCV Manager for initialization")
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION, context, mLoaderCallback)
} else {
Timber.e( "OpenCV library found inside package. Using it!")
mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS)
}
}
override fun onDestroy() {
super.onDestroy()
faceRecognitionCamera.disableView()
}
private fun trainFaces(trabajador: Trabajador, mat: Mat) {
val croppedImage = croppedImage(mat)
val detalleTrabajador = DetalleTrabajador(null,trabajador.vCodTrabajador,matToBase64String(croppedImage))
subscriptions.add(trainViewModel.registrarLuegoObtenerTodosDetalleTrabajador(detalleTrabajador)
.subscribeOn(Schedulers.io())
.observeOn(Schedulers.io())
.subscribe ({
val matrix = ArrayList<Mat>()
val labels = ArrayList<Int>()
for (detalle in it.detallesTrabajador){
matrix.add(base64StringToMat(detalle.vPuntosRostro))
labels.add(detalle.vCodTrabajador.toInt())
}
val vectorClasses = Mat(labels.size,1,CvType.CV_32SC1)
vectorClasses.put(0,0,labels.toIntArray())
recognize.train(matrix,vectorClasses)
saveImage()
Toast.makeText(context!!,"La imagen ha sido guardada y asignada a ${trabajador.vNombreCompleto}" ,Toast.LENGTH_SHORT).show()
},{
Timber.e(it)
Toast.makeText(context!!,"No se ha podido asignar la imagen al trabajador seleccionado",Toast.LENGTH_SHORT).show()
})
)
}
private fun saveImage():Boolean{
val path = File(Environment.getExternalStorageDirectory(),"TrainedData")
path.mkdirs()
val file = File(path,"lbph_trained_data.xml")
recognize.save(file.toString())
return file.exists()
}
private fun croppedImage(mat: Mat):Mat{
var rectCrop: Rect?=null
for(face in faces.toArray()){
rectCrop = Rect(face.x,face.y,face.width,face.height)
}
return Mat(mat,rectCrop)
}
private fun matToBase64String(mat: Mat):String{
val data = ByteArray((mat.total() * mat.channels()).toInt())
mat.get(0,0,data)
return String(android.util.Base64.encode(data, android.util.Base64.DEFAULT))
}
private fun base64StringToMat(base64String: String):Mat{
val data = android.util.Base64.decode(base64String, android.util.Base64.DEFAULT)
val mat = Mat(data.size, 1, CvType.CV_8U)
mat.put(0, 0, data)
return mat
}
private fun showDialog(mat: Mat) {
val view : View = View.inflate(context!!,R.layout.dialog_custom_employee_train_face, null)
val trabajadorAutocompleteTextView = view.findViewById(R.id.trabajadorAutocompleteTextView) as AutoCompleteTextView
var empleadoSeleccionado : Trabajador? = null
subscriptions.add(trainViewModel.obtenerTrabajadores()
.subscribeOn(Schedulers.newThread())
.observeOn(AndroidSchedulers.mainThread())
.subscribe ({
trabajadorAutocompleteTextView.setAdapter(ArrayAdapter<Trabajador>(context!!, android.R.layout.simple_dropdown_item_1line,it.trabajadores))
trabajadorAutocompleteTextView.setOnItemClickListener { parent, _, position, _ ->
empleadoSeleccionado = parent.adapter.getItem(position) as Trabajador
Timber.e("Trabajador seleccionado -> $empleadoSeleccionado")
}
val builder = AlertDialog.Builder(context!!)
.setTitle("Asignar rostro a trabajador")
.setView(view)
.setPositiveButton("Guardar",null)// Set up positive button, but do not provide a listener, so we can check the string before dismissing the dialog
.setNegativeButton("Cancelar") {dialog, _->
dialog.dismiss()
}
.setCancelable(false)
val dialog = builder.create()
dialog.setOnShowListener {
val button= (it as AlertDialog).getButton(AlertDialog.BUTTON_POSITIVE)
button.setOnClickListener {
if(empleadoSeleccionado==null){
trabajadorAutocompleteTextView.error = "Debe seleccionar su Documento de Identidad"
}else{
//subscriptions.clear()
trainFaces(empleadoSeleccionado!!,mat)
dialog.dismiss()
}
}
}
dialog!!.window!!.setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_STATE_VISIBLE);
dialog.show()
},{
Timber.e(it)
})
)
}
}
Проблема в том, что первый наблюдатель подписан правильно, но по какой-то причине второй наблюдатель (который являетсяв функции TrainFaces) никогда не подписывается.Я думаю, что проблема в том, как я подписываюсь на наблюдателя.но я не нашел случаев, похожих на мои, когда искал решение.Пожалуйста помоги!и заранее спасибо.