Theory Grid_Layers_List
subsection‹Layer-based Modelling using List Types(\thy)›
theory
Grid_Layers_List
imports
NN_Layers_List_Main
Grid_Layers
begin
declare[[nn_proof_mode = "eval"]]
import_TensorFlow grid file "model/trained-model_binary-step_linear/model.json"
as seq_layer_list
declare[[nn_proof_mode = "nbe"]]
text ‹
Our new Isabelle/Isar command @{term "import_TensorFlow"} encodes the neural network model stored in
the file model.json as sequence of layers, i.e., the formal encoding we developed. Our datatype
package also proves that the imported model complies with the requirements of our formal model as
well as proves various auxiliary properties (e.g., conversion between different representations)
that can be useful during interactive verification.›
import_data_file "model/trained-model_binary-step_linear/input_small.txt"
defining inputs_small
import_data_file "model/trained-model_binary-step_linear/expectations_small.txt"
defining expectations_small
import_data_file "model/trained-model_binary-step_linear/input.txt"
defining inputs
import_data_file "model/trained-model_binary-step_linear/expectations.txt"
defining expectations
import_data_file "model/trained-model_binary-step_linear/predictions.txt"
defining predictions
text ‹To ensure that our formalisation is a faithful representation of the neural
networks that we defined in TensorFlow, we provide a framework that supports
the import of trained TensorFlow networks and their test data. We can then
use this to evaluate our Isabelle network and validate that the output is the
same, hence providing confidence that our formalisation is accurate.
We can import text files containing NumPy arrays of our test inputs,
expectations and predictions from our trained TensorFlow network.›
thm grid.Layers_def
thm grid.Layers.dense_input_def
thm grid.Layers.OUTPUT_def
thm grid.layer_defs
thm grid.Layers_def
thm grid.φ_grid.simps
thm grid.NeuralNet_def
thm inputs_def
thm predictions_def
lemmas grid_defs = grid.Layers_def grid.layer_defs grid.NeuralNet_def
lemmas activation_defs = identity_def binary_step_def
lemma grid_closed [simp]:
‹predict⇩s⇩e⇩q⇩_⇩l⇩a⇩y⇩e⇩r_⇩l grid.NeuralNet xs = (case xs of
[x3, x2, x1, x0] ⇒ let y = 2 * (x3 + (x2 * 2 + (x1 * 4 + x0 * 8)))
in Some (
[(if y - 7 ≤ 0 then 0 else 1) +
((if y - 11 ≤ 0 then 0 else 1) + ((if y - 21 ≤ 0 then 0 else 1)
+ ((if y - 25 ≤ 0 then 0 else 1) - (if y - 23 ≤ 0 then 0 else 1))
- (if y - 19 ≤ 0 then 0 else 1)) - (if y - 9 ≤ 0 then 0 else 1))
- (if y - 5 ≤ 0 then 0 else 1) + 1,
(if y - 5 ≤ 0 then 0 else 1) + ((if y - 23 ≤ 0 then 0 else 1)
- (if y - 25 ≤ 0 then 0 else 1) - (if y - 7 ≤ 0 then 0 else 1)),
(if y - 9 ≤ 0 then 0 else 1) + ((if y - 19 ≤ 0 then 0 else 1)
- (if y - 21 ≤ 0 then 0 else 1) - (if y - 11 ≤ 0 then 0 else 1))]
)
| _ ⇒ None)›
by(auto simp add:grid_defs predict⇩s⇩e⇩q⇩_⇩l⇩a⇩y⇩e⇩r_⇩l_def activation_defs Let_def split:list.split)
lemma grid_img_defined: ‹((predict⇩s⇩e⇩q⇩_⇩l⇩a⇩y⇩e⇩r_⇩l grid.NeuralNet xs) ≠ None) = (length xs = 4)›
by(auto simp add:Let_def split:list.split)
lemma grid_img_defined': ‹(∃ y. (predict⇩s⇩e⇩q⇩_⇩l⇩a⇩y⇩e⇩r_⇩l grid.NeuralNet xs) = Some y) = (length xs = 4)›
using grid_img_defined by auto
lemma grid_image:
assumes ‹(predict⇩s⇩e⇩q⇩_⇩l⇩a⇩y⇩e⇩r_⇩l grid.NeuralNet xs) ≠ None›
shows ‹the (predict⇩s⇩e⇩q⇩_⇩l⇩a⇩y⇩e⇩r_⇩l grid.NeuralNet xs) ∈ { [0, 0, 1],
[0, 1, 0],
[1, 0, 0]}›
using assms grid_img_defined apply simp
by(auto simp add:Let_def split:list.split)
lemma grid_image_approx:
‹ran (predict⇩s⇩e⇩q⇩_⇩l⇩a⇩y⇩e⇩r_⇩l grid.NeuralNet) ⊆ {[0, 0, 1], [0, 1, 0], [1, 0, 0]}›
apply(simp only:ran_def)
using grid_image by force
text ‹The lemma @{term "grid_image_approx"} shows that the output of the classification
is never ambiguous (i.e., two or more classification output having the value 1). ›
lemma grid_dom: ‹dom (predict⇩s⇩e⇩q⇩_⇩l⇩a⇩y⇩e⇩r_⇩l grid.NeuralNet) = {a. length a = 4} ›
by (simp add:grid_defs predict⇩s⇩e⇩q⇩_⇩l⇩a⇩y⇩e⇩r_⇩l_def activation_defs dom_def)
definition "range_of x = (if x = (0::real) then {0..0.04::real} else {0.96..1})"
lemma ‹x3 ∈ {0.96..1.00} ∧ x2 ∈ {0.96..1.00}
∧ x1 ∈ {0.00..0.04} ∧ x0 ∈ {0.00..0.04} ⟹ predict⇩s⇩e⇩q⇩_⇩l⇩a⇩y⇩e⇩r_⇩l grid.NeuralNet [x3, x2, x1, x0] = Some [0, 1, 0]›
by(simp add: Let_def)
lemma ‹x3 ∈ {0.00..0.04} ∧ x2 ∈ {0.00..0.04}
∧ x1 ∈ {0.96..1.00} ∧ x0 ∈ {0.96..1.00} ⟹ predict⇩s⇩e⇩q⇩_⇩l⇩a⇩y⇩e⇩r_⇩l grid.NeuralNet [x3, x2, x1, x0] = Some [0, 1, 0]›
by(simp add: Let_def)
lemma ‹x3 ∈ {0.95..1.00} ∧ x2 ∈ {0.00..0.05}
∧ x1 ∈ {0.95..1.00} ∧ x0 ∈ {0.00..0.05} ⟹ predict⇩s⇩e⇩q⇩_⇩l⇩a⇩y⇩e⇩r_⇩l grid.NeuralNet [x3, x2, x1, x0] = Some [0, 0, 1]›
by(simp add: Let_def)
lemma ‹x3 ∈ {0.00..0.1} ∧ x2 ∈ {0.96..1.00}
∧ x1 ∈ {0.00..0.1} ∧ x0 ∈ {0.96..1.00} ⟹ predict⇩s⇩e⇩q⇩_⇩l⇩a⇩y⇩e⇩r_⇩l grid.NeuralNet [x3, x2, x1, x0] = Some [0, 0, 1]›
by(simp add: Let_def)
text ‹A common definition of safety in neural networks is the requirement that small changes to an
input should not change the classification. For this grid example, we express such a verification
goal as shown above, where we set a small bound of noise on the input, and verify that the output
classification remains constant.›
lemma grid_meets_predictions:
‹⊨⇩i⇩l {inputs} (predict⇩s⇩e⇩q⇩_⇩l⇩a⇩y⇩e⇩r_⇩l grid.NeuralNet) {intervals_of_l 0.000001 predictions}›
by (simp add: ensure_testdata_interval_list_def upper_Interval lower_Interval predictions_def
intervals_of_l_def inputs_def in_set_interval)
lemma grid_meets_expectations_max_classifier:
‹⊨⇩l {inputs_small} (predict⇩s⇩e⇩q⇩_⇩l⇩a⇩y⇩e⇩r_⇩l grid.NeuralNet) {expectations_small}›
by(simp add: ensure_testdata_max_list_def expectations_small_def inputs_small_def)
lemma grid_min_delta_classifier:
‹1.0 ⊨ predict⇩s⇩e⇩q⇩_⇩l⇩a⇩y⇩e⇩r_⇩l grid.NeuralNet›
unfolding ensure_delta_min_def Prediction_Utils.δ⇩m⇩i⇩n_def max⇩l⇩i⇩s⇩t_def
using grid_image_approx
by auto
text ‹The lemmas @{thm [source] "grid_meets_predictions"}, @{thm [source] "grid_meets_expectations_max_classifier"}
and @{thm [source] "grid_min_delta_classifier"} show that our definition of the grid neural network computes
the same prediction as the TensorFlow trained network. ›
end