SOL4Py Sample: InceptionV3Classifier

SOL4Py Samples













#******************************************************************************
#
#  Copyright (c) 2018-2019 Antillia.com TOSHIYUKI ARAI. ALL RIGHTS RESERVED.
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU General Public License as published by
#    the Free Software Foundation, either version 3 of the License, or
#    (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#    You should have received a copy of the GNU General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
#******************************************************************************

 
# 2019/03/05

# Inception V3 model for Keras.
# See: https://github.com/keras-team/keras-applications/blob/master/keras_applications/inception_v3.py

#  InceptionV3Classifier.py

# encodig: utf-8

import sys
import os
import time
import traceback
import pandas as pd
import seaborn as sns

import matplotlib.pyplot as plt
import numpy as np

from keras.utils import np_utils
from keras.preprocessing.image import image, load_img, img_to_array
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import preprocess_input, decode_predictions
 
sys.path.append('../../')

from SOL4Py.ZApplicationView import *
from SOL4Py.ZLabeledComboBox import *
from SOL4Py.ZPushButton      import *
from SOL4Py.ZVerticalPane    import *
from SOL4Py.ZPILImageCropper import *
 
from SOL4Py.ZScrolledPlottingArea import *
from SOL4Py.ZScalableScrolledImageView import *
from SOL4Py.ZTabbedWindow import *

DATASET_InceptionV3 = 0

############################################################
# Classifier View

class MainView(ZApplicationView):  
  # Class variables

  # ClassifierView Constructor
  def __init__(self, title, x, y, width, height):
    super(MainView, self).__init__(title, x, y, width, height)
    self.font        = QFont("Arial", 10)
    self.setFont(self.font)
    
    self.model_loaded = False

    # keras.preprocessing.image
    self.image       = None

    # 1 Add a labeled combobox to top dock area
    self.add_datasets_combobox()
                               
    # 2 Add a textedit to the left pane of the center area.
    self.text_editor = QTextEdit()
    self.text_editor.setLineWrapColumnOrWidth(600)
    self.text_editor.setLineWrapMode(QTextEdit.FixedPixelWidth)
    self.text_editor.setGeometry(0, 0, width/2, height)
    
    # 3 Add a tabbed_window the rigth pane of the center area.
    self.tabbed_window = ZTabbedWindow(self, 0, 0, width/2, height)
    
    # 4 Add a imageview to the tabbed_window.
    self.image_view = ZScalableScrolledImageView(self, 0, 0, width/3, height/3)   
    self.tabbed_window.add("SourceImage", self.image_view)
    
    # 5 Add a test_imageview to the right pane of the center area.
    self.test_image_view = ZScalableScrolledImageView(self, 0, 0, width/3, height/3)   
    self.tabbed_window.add("TestImage", self.test_image_view)

    self.add(self.text_editor)
    self.add(self.tabbed_window)
    
    self.show()
    # 6 Load trained model
    self.write("Loading InceptionV3 Model")
    self.model = InceptionV3(weights='imagenet')
    self.write("Loaded InceptionV3")
    self.model_loaded = True


  def write(self, text):
    self.text_editor.append(text)
    self.text_editor.repaint()


  def add_datasets_combobox(self):
    self.dataset_id = DATASET_InceptionV3
    self.datasets_combobox = ZLabeledComboBox(self, "Datasets", Qt.Horizontal)
    self.datasets_combobox.setFont(self.font)
    
    # We use the following datasets.
    self.datasets = {"InceptionV3": DATASET_InceptionV3}
    title = self.get_title()
    
    self.setWindowTitle(self.__class__.__name__ + " - " + title)
    
    self.datasets_combobox.add_items(self.datasets.keys())
    ##self.datasets_combobox.add_activated_callback(self.datasets_activated)
    self.datasets_combobox.set_current_text(self.dataset_id)

    self.classifier_button = ZPushButton("Classify", self)
    self.classifier_button.setEnabled(False)

    self.clear_button = ZPushButton("Clear", self)
    
    self.classifier_button.add_activated_callback(self.classifier_button_activated)
    self.clear_button.add_activated_callback(self.clear_button_activated)

    self.datasets_combobox.add(self.classifier_button)
    self.datasets_combobox.add(self.clear_button)
    
    self.set_top_dock(self.datasets_combobox)


  # Show FileOpenDialog and select an image file.
  def file_open(self):
    if self.model_loaded:
      options = QFileDialog.Options()
      filename, _ = QFileDialog.getOpenFileName(self,"FileOpenDialog", "",
                     "All Files (*);;Image Files (*.png;*jpg;*.jpeg)", options=options)
      if filename:
        self.load_file(filename)
      if self.array_image.all() != None:
        self.classifier_button.setEnabled(True)
    else:
      QMessageBox.warning(self, "Error", 
           "VGG16: Model is not loaded." )


  def load_file(self, filename):
    try:
      image_cropper = ZPILImageCropper(filename)
      cropped_file = "./~temp_cropped.png"
      # 1 Crop maximum square region from the filename and save it as a cropped_file.
      image_cropper.crop_maximum_square_region(cropped_file)
      
      # 2 Load an image from the cropped_fle.
      self.image_view.load_image(cropped_file) 
      self.set_filenamed_title(filename)
      
      # 3 Load an image from the cropped_file as Pillow image format.  
      self.image = load_img(cropped_file, target_size=(224, 224))
      
      # 4 Convert the self.image to numpy ndarray. 
      self.ndarray = img_to_array(self.image)
      
      # 5 Set self.nadarryy to the test_image_view.
      self.test_image_view.set_image(self.ndarray)

      # 6 Convert self.ndarray to an input data format applicable to the VGG16 model.
      self.array_image = preprocess_input(self.ndarray);
      
      #print(self.image.shape)
      os.remove(cropped_file)
      
    except:
      self.write(formatted_traceback())


  def classifier_button_activated(self, text):
    self.classifier_button.setEnabled(False)    
    self.clear_button.setEnabled(False)
    try:
      self.classify()
    except:
      self.write(formatted_traceback())
    self.classifier_button.setEnabled(True)
    self.clear_button.setEnabled(True)


  def classify(self):
    self.write("classify start")
    self.array_input = np.stack([self.array_image]);
    
    predictions = self.model.predict(self.array_input)
    results = decode_predictions(predictions, top=5)[0]
    for result in results:
      self.write(str(result))

    self.write("classify end")


  def clear_button_activated(self, text):
    self.text_editor.setText("")
    pass


############################################################
#    
if main(__name__):

  try:
    app_name  = os.path.basename(sys.argv[0])
    applet    = QApplication(sys.argv)
  
    main_view = MainView(app_name, 40, 40, 900, 500)
    main_view.show ()

    applet.exec_()

  except:
    traceback.print_exc()
    

Last modified:28 Apr. 2019