diff --git a/lib/Word_Lib/Word_Lemmas_Internal.thy b/lib/Word_Lib/Word_Lemmas_Internal.thy index 8e57f22407..a136098823 100644 --- a/lib/Word_Lib/Word_Lemmas_Internal.thy +++ b/lib/Word_Lib/Word_Lemmas_Internal.thy @@ -933,6 +933,11 @@ lemma shiftr_not_max_word: "0 < n \ w >> n \ max_word" by (metis and_mask_eq_iff_shiftr_0 and_mask_not_max_word diff_less len_gt_0 shiftr_le_0 word_shiftr_lt) +lemma shiftr_less_max_mask: + "0 < n \ x >> n < mask LENGTH('a)" for x :: "'a::len word" + using not_max_word_iff_less shiftr_not_max_word + by auto + lemma word_sandwich1: fixes a b c :: "'a::len word" assumes "a < b" diff --git a/proof/crefine/AARCH64/Arch_C.thy b/proof/crefine/AARCH64/Arch_C.thy index e3481189f9..8b7b1c02da 100644 --- a/proof/crefine/AARCH64/Arch_C.thy +++ b/proof/crefine/AARCH64/Arch_C.thy @@ -92,7 +92,7 @@ lemma clearMemory_PT_setObject_PTE_ccorres: apply (clarsimp simp: guard_is_UNIV_def bit_simps split: if_split) apply clarsimp apply (frule is_aligned_addrFromPPtr_n, simp) - apply (simp add: is_aligned_no_overflow' addrFromPPtr_mask_cacheLineSize) + apply (simp add: is_aligned_no_overflow' addrFromPPtr_mask_cacheLineBits) apply (rule conjI) apply (simp add: unat_mask_eq flip: mask_2pm1) apply (simp add: mask_eq_exp_minus_1) @@ -1604,7 +1604,7 @@ definition flushtype_relation :: "flush_type \ machine_word \ scast ` {Kernel_C.ARMPageUnify_Instruction, Kernel_C.ARMVSpaceUnify_Instruction}" lemma doFlush_ccorres: - "ccorres dc xfdc (\s. vs \ ve \ ps \ ps + (ve - vs) \ vs && mask cacheLineSize = ps && mask cacheLineSize + "ccorres dc xfdc (\s. vs \ ve \ ps \ ps + (ve - vs) \ vs && mask cacheLineBits = ps && mask cacheLineBits \ ptrFromPAddr ps \ ptrFromPAddr ps + (ve - vs) \ unat (ve - vs) \ gsMaxObjectSize s) (\flushtype_relation t \invLabel\ \ \\start = vs\ \ \\end = ve\ \ \\pstart = ps\) [] diff --git a/proof/crefine/AARCH64/Invoke_C.thy b/proof/crefine/AARCH64/Invoke_C.thy index e74e29273c..c688ea43f9 100644 --- a/proof/crefine/AARCH64/Invoke_C.thy +++ b/proof/crefine/AARCH64/Invoke_C.thy @@ -1685,7 +1685,7 @@ lemma clearMemory_untyped_ccorres: word_of_nat_less Kernel_Config.resetChunkBits_def word_bits_def unat_2p_sub_1) apply (strengthen is_aligned_no_wrap'[where sz=sz] is_aligned_addrFromPPtr_n)+ - apply (simp add: addrFromPPtr_mask_cacheLineSize pptrBaseOffset_alignment_def) + apply (simp add: addrFromPPtr_mask_cacheLineBits pptrBaseOffset_alignment_def) apply (cases "ptr = 0"; simp) apply (drule subsetD, rule intvl_self, simp) apply simp diff --git a/proof/crefine/AARCH64/Machine_C.thy b/proof/crefine/AARCH64/Machine_C.thy index 1e02ad82ad..f741dc55f6 100644 --- a/proof/crefine/AARCH64/Machine_C.thy +++ b/proof/crefine/AARCH64/Machine_C.thy @@ -156,7 +156,7 @@ assumes cleanByVA_PoU_ccorres: assumes cleanCacheRange_RAM_ccorres: "ccorres dc xfdc (\s. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask cacheLineSize = w3 && mask cacheLineSize + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits \ unat (w2 - w1) \ gsMaxObjectSize s) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (cleanCacheRange_RAM w1 w2 w3)) @@ -165,7 +165,7 @@ assumes cleanCacheRange_RAM_ccorres: assumes cleanCacheRange_PoU_ccorres: "ccorres dc xfdc (\s. unat (w2 - w1) \ gsMaxObjectSize s \ w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask cacheLineSize = w3 && mask cacheLineSize) + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (cleanCacheRange_PoU w1 w2 w3)) (Call cleanCacheRange_PoU_'proc)" @@ -173,7 +173,7 @@ assumes cleanCacheRange_PoU_ccorres: assumes cleanInvalidateCacheRange_RAM_ccorres: "ccorres dc xfdc (\s. unat (w2 - w1) \ gsMaxObjectSize s \ w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask cacheLineSize = w3 && mask cacheLineSize) + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (cleanInvalidateCacheRange_RAM w1 w2 w3)) (Call cleanInvalidateCacheRange_RAM_'proc)" @@ -181,14 +181,14 @@ assumes cleanInvalidateCacheRange_RAM_ccorres: assumes invalidateCacheRange_RAM_ccorres: "ccorres dc xfdc ((\s. unat (w2 - w1) \ gsMaxObjectSize s) and (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask cacheLineSize = w3 && mask cacheLineSize)) + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits)) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (invalidateCacheRange_RAM w1 w2 w3)) (Call invalidateCacheRange_RAM_'proc)" assumes invalidateCacheRange_I_ccorres: "ccorres dc xfdc (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask cacheLineSize = w3 && mask cacheLineSize) + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (invalidateCacheRange_I w1 w2 w3)) (Call invalidateCacheRange_I_'proc)" diff --git a/proof/crefine/AARCH64/Recycle_C.thy b/proof/crefine/AARCH64/Recycle_C.thy index 98055bab5b..d2fbd95eb2 100644 --- a/proof/crefine/AARCH64/Recycle_C.thy +++ b/proof/crefine/AARCH64/Recycle_C.thy @@ -387,13 +387,7 @@ lemma clearMemory_PageCap_ccorres: capAligned_def word_of_nat_less) apply (frule is_aligned_addrFromPPtr_n, simp add: pageBitsForSize_def split: vmpage_size.splits) apply (simp add: bit_simps pptrBaseOffset_alignment_def)+ - apply (simp add: is_aligned_no_overflow') - apply (rule conjI) - subgoal - apply (prop_tac "cacheLineSize \ pageBitsForSize sz") - apply (simp add: pageBitsForSize_def bit_simps cacheLineSize_def split: vmpage_size.splits) - apply (simp add: is_aligned_mask[THEN iffD1] is_aligned_weaken) - done + apply (simp add: is_aligned_no_overflow' addrFromPPtr_mask_cacheLineBits) apply (simp add: pageBitsForSize_def bit_simps split: vmpage_size.splits) done diff --git a/proof/crefine/AARCH64/Wellformed_C.thy b/proof/crefine/AARCH64/Wellformed_C.thy index 76e236b963..26a9aee3b1 100644 --- a/proof/crefine/AARCH64/Wellformed_C.thy +++ b/proof/crefine/AARCH64/Wellformed_C.thy @@ -562,6 +562,50 @@ lemma ucast_irq_array_guard[unfolded irq_array_size_val, simplified]: done +text \cacheLineBits interface\ + +lemmas cacheLineBits_val = + cacheLineBits_def[unfolded Kernel_Config.CONFIG_L1_CACHE_LINE_SIZE_BITS_def] + +lemma cacheLineBits_leq_pageBits: + "cacheLineBits \ pageBits" + using cacheLineBits_sanity + by (simp add: pageBits_def) + +lemma pageBits_leq_pptrBaseOffset_alignment: + "pageBits \ pptrBaseOffset_alignment" + by (simp add: pageBits_def pptrBaseOffset_alignment_def) + +lemma cacheLineBits_leq_pptrBaseOffset_alignment: + "cacheLineBits \ pptrBaseOffset_alignment" + by (rule order.trans, rule cacheLineBits_leq_pageBits, rule pageBits_leq_pptrBaseOffset_alignment) + +lemma cacheLineBits_leq_pbfs: + "cacheLineBits \ pageBitsForSize sz" + by (rule order.trans, rule cacheLineBits_leq_pageBits, rule pbfs_atleast_pageBits) + +lemma addrFromPPtr_mask_pptrBaseOffset_alignment: + "n \ pptrBaseOffset_alignment + \ addrFromPPtr ptr && mask n = ptr && mask n" + unfolding addrFromPPtr_def + by (metis is_aligned_weaken mask_add_aligned pptrBaseOffset_aligned zadd_diff_inverse) + +lemma addrFromPPtr_mask_cacheLineBits: + "addrFromPPtr ptr && mask cacheLineBits = ptr && mask cacheLineBits" + by (rule addrFromPPtr_mask_pptrBaseOffset_alignment, + rule cacheLineBits_leq_pptrBaseOffset_alignment) + +lemma pptrBaseOffset_cacheLineBits_aligned[simp]: + "pptrBaseOffset && mask cacheLineBits = 0" + unfolding is_aligned_mask[symmetric] + by (rule is_aligned_weaken[OF pptrBaseOffset_aligned cacheLineBits_leq_pptrBaseOffset_alignment]) + +lemma ptrFromPAddr_mask_cacheLineBits[simp]: + "ptrFromPAddr v && mask cacheLineBits = v && mask cacheLineBits" + by (simp add: ptrFromPAddr_def add_mask_ignore) + +(* end of Kernel_Config interface section *) + (* Input abbreviations for API object types *) (* disambiguates names *) @@ -645,26 +689,6 @@ abbreviation(input) where "prioInvalid == seL4_InvalidPrio" -(* caches *) - -definition cacheLineSize :: nat where - "cacheLineSize \ 6" - -lemma addrFromPPtr_mask_cacheLineSize: - "addrFromPPtr ptr && mask cacheLineSize = ptr && mask cacheLineSize" - apply (simp add: addrFromPPtr_def AARCH64.pptrBase_def pptrBaseOffset_def canonical_bit_def - paddrBase_def cacheLineSize_def mask_def) - apply word_bitwise - done - -lemma pptrBaseOffset_cacheLineSize_aligned[simp]: - "pptrBaseOffset && mask cacheLineSize = 0" - by (simp add: pptrBaseOffset_def paddrBase_def pptrBase_def cacheLineSize_def mask_def) - -lemma ptrFromPAddr_mask_cacheLineSize[simp]: - "ptrFromPAddr v && mask cacheLineSize = v && mask cacheLineSize" - by (simp add: ptrFromPAddr_def add_mask_ignore) - (* The magic 4 comes out of the bitfield generator -- this applies to all versions of the kernel. *) lemma ThreadState_Restart_mask[simp]: "(scast ThreadState_Restart::machine_word) && mask 4 = scast ThreadState_Restart" diff --git a/proof/crefine/ARM/ArchMove_C.thy b/proof/crefine/ARM/ArchMove_C.thy index b692ea8857..fb22ca37cb 100644 --- a/proof/crefine/ARM/ArchMove_C.thy +++ b/proof/crefine/ARM/ArchMove_C.thy @@ -233,7 +233,7 @@ crunch insertNewCap, Arch_createNewCaps, threadSet, Arch.createObject, setThread simp: unless_def updateObject_default_def crunch_simps ignore_del: preemptionPoint) -lemma addrFromPPtr_mask[simplified ARM.pageBitsForSize_simps]: +lemma addrFromPPtr_mask_ARMSuperSection: "n \ pageBitsForSize ARMSuperSection \ addrFromPPtr ptr && mask n = ptr && mask n" apply (simp add: addrFromPPtr_def) @@ -242,13 +242,6 @@ lemma addrFromPPtr_mask[simplified ARM.pageBitsForSize_simps]: apply (simp flip: mask_eqs(8)) done -(* this could be done as - lemmas addrFromPPtr_mask_5 = addrFromPPtr_mask[where n=5, simplified] - but that wouldn't give a sanity check of the n \ ... assumption disappearing *) -lemma addrFromPPtr_mask_5: - "addrFromPPtr ptr && mask 5 = ptr && mask 5" - by (rule addrFromPPtr_mask[where n=5, simplified]) - end end diff --git a/proof/crefine/ARM/Arch_C.thy b/proof/crefine/ARM/Arch_C.thy index 31a8f5bb37..167279e00d 100644 --- a/proof/crefine/ARM/Arch_C.thy +++ b/proof/crefine/ARM/Arch_C.thy @@ -1598,9 +1598,8 @@ lemma performPageInvocationMapPTE_ccorres: apply simp apply (subst is_aligned_no_wrap', assumption, fastforce) apply (subst add_diff_eq [symmetric], subst is_aligned_no_wrap', assumption, fastforce) - apply (simp add:addrFromPPtr_mask_5) - apply (clarsimp simp:pte_range_relation_def ptr_add_def ptr_range_to_list_def - addrFromPPtr_mask_5) + apply simp + apply (clarsimp simp: pte_range_relation_def ptr_add_def ptr_range_to_list_def) apply (auto simp: valid_pte_slots'2_def upt_conv_Cons[where i=0])[1] apply (clarsimp simp: guard_is_UNIV_def hd_conv_nth last_conv_nth ucast_minus) apply (clarsimp simp: pte_range_relation_def ptr_range_to_list_def objBits_simps archObjSize_def) @@ -1848,9 +1847,9 @@ lemma performPageInvocationMapPDE_ccorres: apply (simp add: hd_conv_nth last_conv_nth) apply (rule conj_assoc[where Q="a \ b" for a b, THEN iffD1])+ apply (rule conjI) - (* the inequality first *) - apply (clarsimp simp:valid_pde_slots'2_def pdeBits_def - objBits_simps archObjSize_def hd_conv_nth) + (* the inequality first *) + apply (clarsimp simp: valid_pde_slots'2_def pdeBits_def + objBits_simps archObjSize_def hd_conv_nth) apply (clarsimp simp:pde_range_relation_def ptr_range_to_list_def ptr_add_def) apply (frule is_aligned_addrFromPPtr_n,simp) apply (cut_tac n = "sz+2" in power_not_zero[where 'a="32"]) @@ -1858,9 +1857,9 @@ lemma performPageInvocationMapPDE_ccorres: apply (subst is_aligned_no_wrap', assumption, fastforce) apply (subst add_diff_eq [symmetric]) apply (subst is_aligned_no_wrap', assumption, fastforce) - apply (simp add:addrFromPPtr_mask_5) + apply simp apply (clarsimp simp: pde_range_relation_def ptr_range_to_list_def CTypesDefs.ptr_add_def - valid_pde_slots'2_def addrFromPPtr_mask_5) + valid_pde_slots'2_def) apply (auto simp: upt_conv_Cons[where i=0])[1] apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem hd_conv_nth last_conv_nth) apply (clarsimp simp: pde_range_relation_def ptr_range_to_list_def pdeBits_def) @@ -2787,10 +2786,9 @@ lemma decodeARMFrameInvocation_ccorres: erule is_aligned_no_wrap', clarsimp\ | solves \frule vmsz_aligned_addrFromPPtr(3)[THEN iffD2], (subst mask_add_aligned mask_add_aligned_right, erule is_aligned_weaken, - rule order_trans[OF _ pbfs_atleast_pageBits[simplified pageBits_def]], simp)+, + rule cacheLineBits_leq_pbfs)+, simp\)+)[1] (* 20s *) done - (* C side *) apply (clarsimp simp: rf_sr_ksCurThread ThreadState_defs mask_eq_iff_w2p word_size word_less_nat_alt from_bool_0 excaps_map_def cte_wp_at_ctes_of) @@ -3146,13 +3144,12 @@ lemma decodeARMPageDirectoryInvocation_ccorres: apply (simp add:linorder_not_le) apply (erule word_less_sub_1) apply (simp add:mask_add_aligned mask_twice) - apply (subgoal_tac "5 \ pageBitsForSize a") - apply (frule(1) is_aligned_weaken) - apply (simp add:mask_add_aligned mask_twice) - apply (erule order_trans[rotated]) - apply (erule flush_range_le1, simp add: linorder_not_le) - apply (erule word_less_sub_1) - apply (case_tac a,simp+)[1] + apply (cut_tac cacheLineBits_leq_pbfs) + apply (frule(1) is_aligned_weaken) + apply (simp add:mask_add_aligned mask_twice) + apply (erule order_trans[rotated]) + apply (erule flush_range_le1, simp add: linorder_not_le) + apply (erule word_less_sub_1) apply simp apply (vcg exspec=resolveVAddr_modifies) apply (rule_tac P'="{s. errstate s = find_ret}" diff --git a/proof/crefine/ARM/Invoke_C.thy b/proof/crefine/ARM/Invoke_C.thy index 524f811aeb..6489c5f838 100644 --- a/proof/crefine/ARM/Invoke_C.thy +++ b/proof/crefine/ARM/Invoke_C.thy @@ -1555,7 +1555,7 @@ lemma clearMemory_untyped_ccorres: word_of_nat_less Kernel_Config.resetChunkBits_def word_bits_def unat_2p_sub_1) apply (strengthen is_aligned_no_wrap'[where sz=sz] is_aligned_addrFromPPtr_n)+ - apply (simp add: addrFromPPtr_mask) + apply simp apply (cases "ptr = 0") apply (drule subsetD, rule intvl_self, simp) apply (simp split: if_split_asm) diff --git a/proof/crefine/ARM/Machine_C.thy b/proof/crefine/ARM/Machine_C.thy index f77622bbb7..1dccec1e1d 100644 --- a/proof/crefine/ARM/Machine_C.thy +++ b/proof/crefine/ARM/Machine_C.thy @@ -260,33 +260,33 @@ lemma index_xf_for_sequence: lemma lineStart_le_mono: "x \ y \ lineStart x \ lineStart y" - by (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 neg_mask_mono_le) + by (clarsimp simp: lineStart_def shiftr_shiftl1 neg_mask_mono_le) lemma lineStart_sub: - "\ x && mask 5 = y && mask 5\ \ lineStart (x - y) = lineStart x - lineStart y" - apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1) + "\ x && mask cacheLineBits = y && mask cacheLineBits\ \ lineStart (x - y) = lineStart x - lineStart y" + apply (clarsimp simp: lineStart_def shiftr_shiftl1) apply (clarsimp simp: mask_out_sub_mask) apply (clarsimp simp: mask_eqs(8)[symmetric]) done lemma lineStart_mask: - "lineStart x && mask 5 = 0" - by (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_AND_NOT_mask) + "lineStart x && mask cacheLineBits = 0" + by (clarsimp simp: lineStart_def shiftr_shiftl1 mask_AND_NOT_mask) lemma cachRangeOp_corres_helper: - "\w1 \ w2; w3 \ w3 + (w2 - w1); w1 && mask 5 = w3 && mask 5\ - \ unat (lineStart w2 - lineStart w1) div 32 = - unat (lineStart (w3 + (w2 - w1)) - lineStart w3) div 32" + "\w1 \ w2; w3 \ w3 + (w2 - w1); w1 && mask cacheLineBits = w3 && mask cacheLineBits\ + \ unat (lineStart w2 - lineStart w1) div (2^cacheLineBits) = + unat (lineStart (w3 + (w2 - w1)) - lineStart w3) div (2^cacheLineBits)" apply (subst dvd_div_div_eq_mult, simp) - apply (clarsimp simp: and_mask_dvd_nat[where n=5, simplified]) - apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1) + apply (clarsimp simp: and_mask_dvd_nat[where n=cacheLineBits, simplified]) + apply (clarsimp simp: lineStart_def shiftr_shiftl1) apply (subst mask_eqs(8)[symmetric]) apply (clarsimp simp: mask_AND_NOT_mask) - apply (clarsimp simp: and_mask_dvd_nat[where n=5, simplified]) + apply (clarsimp simp: and_mask_dvd_nat[where n=cacheLineBits, simplified]) apply (subst mask_eqs(8)[symmetric]) apply (clarsimp simp: lineStart_mask) - apply (subgoal_tac "w3 + (w2 - w1) && mask 5 = w2 && mask 5") + apply (subgoal_tac "w3 + (w2 - w1) && mask cacheLineBits = w2 && mask cacheLineBits") apply clarsimp apply (rule_tac x=w1 and y=w3 in linorder_le_cases) apply (subgoal_tac "lineStart (w3 + (w2 - w1)) - lineStart w2 = lineStart w3 - lineStart w1") @@ -332,31 +332,35 @@ lemma lineIndex_def2: lemma lineIndex_le_mono: "x \ y \ lineIndex x \ lineIndex y" - by (clarsimp simp: lineIndex_def2 cacheLineBits_def le_shiftr) + by (clarsimp simp: lineIndex_def2 le_shiftr) lemma lineIndex_lineStart_diff: - "w1 \ w2 \ (unat (lineStart w2 - lineStart w1) div 32) = unat (lineIndex w2 - lineIndex w1)" - apply (subst shiftr_div_2n'[symmetric, where n=5, simplified]) + "w1 \ w2 \ + unat (lineStart w2 - lineStart w1) div (2^cacheLineBits) = unat (lineIndex w2 - lineIndex w1)" + apply (subst shiftr_div_2n'[symmetric, where n=cacheLineBits, simplified]) apply (drule lineStart_le_mono) apply (drule sub_right_shift[OF lineStart_mask lineStart_mask]) - apply (simp add: lineIndex_def cacheLineBits_def) + apply (simp add: lineIndex_def) done +lemma unat_cacheLine_machine_word[simp]: + "unat ((2::machine_word)^cacheLineBits) = 2^cacheLineBits" + by (rule unat_p2, rule cacheLineBits_le_machine_word) + lemma cacheRangeOp_ccorres: "\\x y. empty_fail (oper x y); \n. ccorres dc xfdc \ (\\index = lineIndex w1 + of_nat n\) hs - (doMachineOp (oper (lineStart w1 + of_nat n * 0x20) - (lineStart w3 + of_nat n * 0x20))) + (doMachineOp (oper (lineStart w1 + of_nat n * (2^cacheLineBits)) + (lineStart w3 + of_nat n * (2^cacheLineBits)))) f; \s. \\\<^bsub>/UNIV\<^esub> {s} f ({t. index_' t = index_' s}) \ \ ccorres dc xfdc (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask 5 = w3 && mask 5) - (\\index = w1 >> 5\) hs + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits) + (\\index = w1 >> cacheLineBits\) hs (doMachineOp (cacheRangeOp oper w1 w2 w3)) - (While \\index < (w2 >> 5) + 1\ + (While \\index < (w2 >> cacheLineBits) + 1\ (f;; \index :== \index + 1))" - apply (clarsimp simp: cacheRangeOp_def doMachineOp_mapM_x split_def - cacheLine_def cacheLineBits_def) + apply (clarsimp simp: cacheRangeOp_def doMachineOp_mapM_x split_def cacheLine_def) apply (rule ccorres_gen_asm[where G=\, simplified]) apply (rule ccorres_guard_imp) apply (rule ccorres_rel_imp) @@ -370,7 +374,7 @@ lemma cacheRangeOp_ccorres: apply (subst min_absorb1[OF order_eq_refl]) apply (erule (2) cachRangeOp_corres_helper) apply (simp add: lineIndex_lineStart_diff) - apply (simp add: lineIndex_def2 cacheLineBits_def) + apply (simp add: lineIndex_def2) apply unat_arith apply wp apply (clarsimp simp: length_upto_enum_step lineStart_le_mono unat_div) @@ -379,37 +383,39 @@ lemma cacheRangeOp_ccorres: apply (simp add: lineIndex_lineStart_diff unat_sub[OF lineIndex_le_mono]) apply (subst le_add_diff_inverse) apply (simp add: lineIndex_le_mono word_le_nat_alt[symmetric]) - apply (simp add: lineIndex_def2 cacheLineBits_def) - apply (rule unat_mono[where 'a=32 and b="0xFFFFFFFF", simplified]) - apply word_bitwise - apply (simp add: lineIndex_def cacheLineBits_def lineStart_def) + apply (simp add: lineIndex_def2) + apply (rule less_le_trans) + apply (rule unat_mono[where 'a=machine_word_len and b="mask word_bits"]) + apply (rule shiftr_cacheLineBits_less_mask_word_bits) + apply (simp add: mask_def word_bits_def unat_max_word) + apply (simp add: lineIndex_def lineStart_def) done - lemma lineStart_eq_minus_mask: - "lineStart w1 = w1 - (w1 && mask 5)" - by (simp add: lineStart_def cacheLineBits_def mask_out_sub_mask[symmetric] and_not_mask) + "lineStart w1 = w1 - (w1 && mask cacheLineBits)" + by (simp add: lineStart_def mask_out_sub_mask[symmetric] and_not_mask) lemma lineStart_idem[simp]: "lineStart (lineStart x) = lineStart x" - by (simp add: lineStart_def cacheLineBits_def) - + by (simp add: lineStart_def) lemma cache_range_lineIndex_helper: - "lineIndex w1 + of_nat n << 5 = w1 - (w1 && mask 5) + of_nat n * 0x20" - apply (clarsimp simp: lineIndex_def cacheLineBits_def word_shiftl_add_distrib lineStart_def[symmetric, unfolded cacheLineBits_def] lineStart_eq_minus_mask[symmetric]) + "lineIndex w1 + of_nat n << cacheLineBits = + w1 - (w1 && mask cacheLineBits) + of_nat n * (2^cacheLineBits)" + apply (clarsimp simp: lineIndex_def word_shiftl_add_distrib lineStart_def[symmetric] + lineStart_eq_minus_mask[symmetric]) apply (simp add: shiftl_t2n) done - lemma cleanCacheRange_PoC_ccorres: "ccorres dc xfdc (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask 5 = w3 && mask 5) + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (cleanCacheRange_PoC w1 w2 w3)) (Call cleanCacheRange_PoC_'proc)" apply (rule ccorres_gen_asm[where G=\, simplified]) apply (cinit' lift: start_' end_' pstart_') + apply (fold cacheLineBits_val) (* make CACHE_LINE_SIZE_BITS value symbolic *) apply (clarsimp simp: cleanCacheRange_PoC_def word_sle_def whileAnno_def) apply (ccorres_remove_UNIV_guard) apply csymbr @@ -420,22 +426,24 @@ lemma cleanCacheRange_PoC_ccorres: apply (rule ccorres_guard_imp2) apply csymbr apply (ctac add: cleanByVA_ccorres) - apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 - mask_out_sub_mask) - apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) + apply (clarsimp simp: lineStart_def shiftr_shiftl1 mask_out_sub_mask) + apply (drule_tac s="w1 && mask cacheLineBits" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=cleanByVA_modifies) apply clarsimp done lemma cleanInvalidateCacheRange_RAM_ccorres: - "ccorres dc xfdc ((\s. unat (w2 - w1) \ gsMaxObjectSize s) - and (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask 5 = w3 && mask 5 \ unat (w2 - w2) \ gsMaxObjectSize s)) - (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] + "ccorres dc xfdc + ((\s. unat (w2 - w1) \ gsMaxObjectSize s) and + (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) \ + w1 && mask cacheLineBits = w3 && mask cacheLineBits \ + unat (w2 - w2) \ gsMaxObjectSize s)) + (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (cleanInvalidateCacheRange_RAM w1 w2 w3)) (Call cleanInvalidateCacheRange_RAM_'proc)" apply (rule ccorres_gen_asm) apply (cinit' lift: start_' end_' pstart_') + apply (fold cacheLineBits_val) (* make CACHE_LINE_SIZE_BITS value symbolic *) apply (clarsimp simp: word_sle_def whileAnno_def) apply (ccorres_remove_UNIV_guard) apply (rule ccorres_Guard_Seq) @@ -455,9 +463,8 @@ lemma cleanInvalidateCacheRange_RAM_ccorres: apply (rule ccorres_guard_imp2) apply csymbr apply (ctac add: cleanInvalByVA_ccorres) - apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 - mask_out_sub_mask) - apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) + apply (clarsimp simp: lineStart_def shiftr_shiftl1 mask_out_sub_mask) + apply (drule_tac s="w1 && mask cacheLineBits" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=cleanInvalByVA_modifies) apply (rule ceqv_refl) apply (ctac (no_vcg) add: dsb_ccorres) @@ -468,7 +475,7 @@ lemma cleanInvalidateCacheRange_RAM_ccorres: lemma cleanCacheRange_RAM_ccorres: "ccorres dc xfdc (\s. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask 5 = w3 && mask 5 + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits \ unat (w2 - w1) \ gsMaxObjectSize s) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (cleanCacheRange_RAM w1 w2 w3)) @@ -493,12 +500,13 @@ lemma cleanCacheRange_RAM_ccorres: lemma cleanCacheRange_PoU_ccorres: "ccorres dc xfdc ((\s. unat (w2 - w1) \ gsMaxObjectSize s) and (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask 5 = w3 && mask 5)) + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits)) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (cleanCacheRange_PoU w1 w2 w3)) (Call cleanCacheRange_PoU_'proc)" apply (rule ccorres_gen_asm) apply (cinit' lift: start_' end_' pstart_') + apply (fold cacheLineBits_val) (* make CACHE_LINE_SIZE_BITS value symbolic *) apply (clarsimp simp: word_sle_def whileAnno_def) apply (ccorres_remove_UNIV_guard) apply (rule ccorres_Guard_Seq) @@ -512,9 +520,8 @@ lemma cleanCacheRange_PoU_ccorres: apply (rule ccorres_guard_imp2) apply csymbr apply (ctac add: cleanByVA_PoU_ccorres) - apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 - mask_out_sub_mask) - apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) + apply (clarsimp simp: lineStart_def shiftr_shiftl1 mask_out_sub_mask) + apply (drule_tac s="w1 && mask cacheLineBits" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=cleanByVA_PoU_modifies) apply clarsimp apply (frule(1) ghost_assertion_size_logic) @@ -528,12 +535,13 @@ lemma dmo_if: lemma invalidateCacheRange_RAM_ccorres: "ccorres dc xfdc ((\s. unat (w2 - w1) \ gsMaxObjectSize s) and (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask 5 = w3 && mask 5)) + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits)) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (invalidateCacheRange_RAM w1 w2 w3)) (Call invalidateCacheRange_RAM_'proc)" apply (rule ccorres_gen_asm) apply (cinit' lift: start_' end_' pstart_') + apply (fold cacheLineBits_val) (* make CACHE_LINE_SIZE_BITS value symbolic *) apply (clarsimp simp: word_sle_def whileAnno_def split del: if_split) apply (ccorres_remove_UNIV_guard) apply (simp add: invalidateCacheRange_RAM_def doMachineOp_bind when_def @@ -542,19 +550,18 @@ lemma invalidateCacheRange_RAM_ccorres: split del: if_split) apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_cond[where R=\]) - apply (clarsimp simp: lineStart_def cacheLineBits_def) + apply (clarsimp simp: lineStart_def) apply (rule ccorres_call[OF cleanCacheRange_RAM_ccorres, where xf'=xfdc], (clarsimp)+) apply (rule ccorres_return_Skip) apply ceqv apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_cond[where R=\]) - apply (clarsimp simp: lineStart_def cacheLineBits_def) + apply (clarsimp simp: lineStart_def) apply csymbr apply (rule ccorres_call[OF cleanCacheRange_RAM_ccorres, where xf'=xfdc], (clarsimp)+) apply (rule ccorres_return_Skip) apply ceqv - apply (rule_tac P="\s. unat (w2 - w1) \ gsMaxObjectSize s" - in ccorres_cross_over_guard) + apply (rule_tac P="\s. unat (w2 - w1) \ gsMaxObjectSize s" in ccorres_cross_over_guard) apply (rule ccorres_Guard_Seq) apply (rule ccorres_basic_srnoop2, simp) apply (ctac add: invalidateL2Range_ccorres) @@ -569,9 +576,8 @@ lemma invalidateCacheRange_RAM_ccorres: apply (rule ccorres_guard_imp2) apply csymbr apply (ctac add: invalidateByVA_ccorres) - apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 - mask_out_sub_mask) - apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) + apply (clarsimp simp: lineStart_def shiftr_shiftl1 mask_out_sub_mask) + apply (drule_tac s="w1 && mask cacheLineBits" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=invalidateByVA_modifies) apply ceqv apply (ctac add: dsb_ccorres) @@ -583,7 +589,7 @@ lemma invalidateCacheRange_RAM_ccorres: apply (simp add: guard_is_UNIV_def) apply (auto dest: ghost_assertion_size_logic simp: o_def)[1] apply (wp | clarsimp split: if_split)+ - apply (clarsimp simp: lineStart_def cacheLineBits_def guard_is_UNIV_def) + apply (clarsimp simp: lineStart_def guard_is_UNIV_def) apply (clarsimp simp: lineStart_mask) apply (subst mask_eqs(7)[symmetric]) apply (subst mask_eqs(8)[symmetric]) @@ -592,13 +598,14 @@ lemma invalidateCacheRange_RAM_ccorres: lemma invalidateCacheRange_I_ccorres: "ccorres dc xfdc (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask 5 = w3 && mask 5) + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (invalidateCacheRange_I w1 w2 w3)) (Call invalidateCacheRange_I_'proc)" apply (rule ccorres_gen_asm[where G=\, simplified]) apply (cinit' lift: start_' end_' pstart_') apply (clarsimp simp: word_sle_def whileAnno_def) + apply (fold cacheLineBits_val) (* make CACHE_LINE_SIZE_BITS value symbolic *) apply (ccorres_remove_UNIV_guard) apply (simp add: invalidateCacheRange_I_def) apply csymbr @@ -609,21 +616,21 @@ lemma invalidateCacheRange_I_ccorres: apply (rule ccorres_guard_imp2) apply csymbr apply (ctac add: invalidateByVA_I_ccorres) - apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 - mask_out_sub_mask) - apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) + apply (clarsimp simp: lineStart_def shiftr_shiftl1 mask_out_sub_mask) + apply (drule_tac s="w1 && mask cacheLineBits" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=invalidateByVA_I_modifies) apply clarsimp done lemma branchFlushRange_ccorres: "ccorres dc xfdc (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask 5 = w3 && mask 5) + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (branchFlushRange w1 w2 w3)) (Call branchFlushRange_'proc)" apply (rule ccorres_gen_asm[where G=\, simplified]) apply (cinit' lift: start_' end_' pstart_') + apply (fold cacheLineBits_val) (* make CACHE_LINE_SIZE_BITS value symbolic *) apply (clarsimp simp: word_sle_def whileAnno_def) apply (ccorres_remove_UNIV_guard) apply (simp add: branchFlushRange_def) @@ -635,9 +642,8 @@ lemma branchFlushRange_ccorres: apply (rule ccorres_guard_imp2) apply csymbr apply (ctac add: branchFlush_ccorres) - apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 - mask_out_sub_mask) - apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) + apply (clarsimp simp: lineStart_def shiftr_shiftl1 mask_out_sub_mask) + apply (drule_tac s="w1 && mask cacheLineBits" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=branchFlush_modifies) apply clarsimp done diff --git a/proof/crefine/ARM/Recycle_C.thy b/proof/crefine/ARM/Recycle_C.thy index 79d6f27ec5..df3ff05be3 100644 --- a/proof/crefine/ARM/Recycle_C.thy +++ b/proof/crefine/ARM/Recycle_C.thy @@ -444,7 +444,8 @@ lemma clearMemory_PT_setObject_PTE_ccorres: apply (clarsimp simp: ptBits_def pageBits_def pteBits_def) apply (frule is_aligned_addrFromPPtr_n, simp) apply (clarsimp simp: is_aligned_no_overflow'[where n=10, simplified] pageBits_def - field_simps is_aligned_mask[symmetric] mask_AND_less_0) + field_simps is_aligned_mask[symmetric] mask_AND_less_0 + cacheLineBits_le_ptBits[unfolded ptBits_def pteBits_def, simplified]) done lemma modify_gets_helper: diff --git a/proof/crefine/ARM/Retype_C.thy b/proof/crefine/ARM/Retype_C.thy index b0b5fd33ec..45dacb619c 100644 --- a/proof/crefine/ARM/Retype_C.thy +++ b/proof/crefine/ARM/Retype_C.thy @@ -4745,7 +4745,9 @@ proof - apply (clarsimp simp: pageBits_def pdeBits_def valid_arch_state'_def page_directory_at'_def pdBits_def) apply (clarsimp simp: is_aligned_no_overflow'[where n=14, simplified] pdeBits_def - field_simps is_aligned_mask[symmetric] mask_AND_less_0)+ + field_simps is_aligned_mask[symmetric] mask_AND_less_0 + cacheLineBits_le_PageDirectoryObject_sz[unfolded APIType_capBits_def, + simplified])+ done qed diff --git a/proof/crefine/ARM/VSpace_C.thy b/proof/crefine/ARM/VSpace_C.thy index da66e3ee3e..407b485ab9 100644 --- a/proof/crefine/ARM/VSpace_C.thy +++ b/proof/crefine/ARM/VSpace_C.thy @@ -1563,8 +1563,10 @@ definition | ARM_H.flush_type.Unify \ (label = Kernel_C.ARMPageUnify_Instruction \ label = Kernel_C.ARMPDUnify_Instruction)" lemma doFlush_ccorres: - "ccorres dc xfdc (\s. vs \ ve \ ps \ ps + (ve - vs) \ vs && mask 5 = ps && mask 5 - \ unat (ve - vs) \ gsMaxObjectSize s) + "ccorres dc xfdc + (\s. vs \ ve \ ps \ ps + (ve - vs) \ + vs && mask cacheLineBits = ps && mask cacheLineBits \ + unat (ve - vs) \ gsMaxObjectSize s) (\flushtype_relation t \invLabel___int\ \ \\start = vs\ \ \\end = ve\ \ \\pstart = ps\) [] (doMachineOp (doFlush t vs ve ps)) (Call doFlush_'proc)" apply (cinit' lift: pstart_') @@ -1626,7 +1628,7 @@ context kernel_m begin lemma performPageFlush_ccorres: "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and K (asid \ mask asid_bits) - and (\s. ps \ ps + (ve - vs) \ vs && mask 5 = ps && mask 5 + and (\s. ps \ ps + (ve - vs) \ vs && mask cacheLineBits = ps && mask cacheLineBits \ unat (ve - vs) \ gsMaxObjectSize s)) (\\pd = Ptr pd\ \ \\asid = asid\ \ \\start = vs\ \ \\end = ve\ \ \\pstart = ps\ \ \flushtype_relation typ \invLabel___int \) @@ -1757,7 +1759,7 @@ lemma setMessageInfo_ccorres: lemma performPageDirectoryInvocationFlush_ccorres: "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and K (asid \ mask asid_bits) - and (\s. ps \ ps + (ve - vs) \ vs && mask 5 = ps && mask 5 + and (\s. ps \ ps + (ve - vs) \ vs && mask cacheLineBits = ps && mask cacheLineBits \ unat (ve - vs) \ gsMaxObjectSize s)) (\\pd = Ptr pd\ \ \\asid = asid\ \ \\start = vs\ \ \\end = ve\ \ \\pstart = ps\ \ \flushtype_relation typ \invLabel___int \) @@ -1948,18 +1950,13 @@ lemma ccorres_return_void_C': done lemma is_aligned_cache_preconds: - "\is_aligned rva n; n \ 6\ \ rva \ rva + 0x3F \ - addrFromPPtr rva \ addrFromPPtr rva + 0x3F \ rva && mask 5 = addrFromPPtr rva && mask 5" + "\ is_aligned rva n; n \ 6 \ \ rva \ rva + 0x3F \ addrFromPPtr rva \ addrFromPPtr rva + 0x3F" supply if_cong[cong] apply (drule is_aligned_weaken, simp) apply (rule conjI) apply (drule is_aligned_no_overflow, simp, unat_arith)[1] - apply (rule conjI) - apply (drule is_aligned_addrFromPPtr_n, simp) - apply (drule is_aligned_no_overflow, unat_arith) - apply (frule is_aligned_addrFromPPtr_n, simp) - apply (drule_tac x=6 and y=5 in is_aligned_weaken, simp)+ - apply (simp add: is_aligned_mask) + apply (drule is_aligned_addrFromPPtr_n, simp) + apply (drule is_aligned_no_overflow, unat_arith) done lemma pte_pte_invalid_new_spec: @@ -2219,7 +2216,7 @@ lemma unmapPage_ccorres: subgoal by (simp add: upto_enum_step_def upto_enum_word take_bit_Suc hd_map last_map typ_at_to_obj_at_arches field_simps objBits_simps archObjSize_def, - clarsimp dest!: is_aligned_cache_preconds) + drule is_aligned_cache_preconds; clarsimp) apply (simp add: upto_enum_step_def upto_enum_word) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (simp add: hd_map last_map upto_enum_step_def objBits_simps archObjSize_def diff --git a/proof/crefine/ARM/Wellformed_C.thy b/proof/crefine/ARM/Wellformed_C.thy index e2c6c7805d..caa878d9bf 100644 --- a/proof/crefine/ARM/Wellformed_C.thy +++ b/proof/crefine/ARM/Wellformed_C.thy @@ -495,6 +495,63 @@ lemma ucast_irq_array_guard[unfolded irq_array_size_val, simplified]: done +text \cacheLineBits interface\ + +(* only use this inside cache op functions; see Arch_Kernel_Config_Lemmas.cacheLineBits_sanity *) +lemmas cacheLineBits_val = + cacheLineBits_def[unfolded Kernel_Config.CONFIG_L1_CACHE_LINE_SIZE_BITS_def] + +lemma cacheLineBits_le_ptBits: + "cacheLineBits \ ptBits" + using cacheLineBits_sanity + by (simp add: ptBits_def pteBits_def) + +lemma ptBits_leq_pageBits: + "ptBits \ pageBits" + by (simp add: ptBits_def pageBits_def pteBits_def) + +lemma ptBits_leq_pdBits: + "ptBits \ pdBits" + by (simp add: ptBits_def pdBits_def pteBits_def) + +lemma cacheLineBits_leq_pageBits: + "cacheLineBits \ pageBits" + using ptBits_leq_pageBits cacheLineBits_le_ptBits + by simp + +lemma cacheLineBits_leq_pdBits: + "cacheLineBits \ pdBits" + using ptBits_leq_pdBits cacheLineBits_le_ptBits + by simp + +lemma cacheLineBits_le_machine_word: + "cacheLineBits < LENGTH(machine_word_len)" + using pt_bits_stuff cacheLineBits_le_ptBits + by (simp add: word_bits_def) + +lemma APIType_capBits_PageDirectoryObject_pdBits: + "APIType_capBits PageDirectoryObject us = pdBits" + by (simp add: pdBits_def APIType_capBits_def pdeBits_def) + +lemma cacheLineBits_le_PageDirectoryObject_sz: + "cacheLineBits \ APIType_capBits PageDirectoryObject us" + by (simp add: APIType_capBits_PageDirectoryObject_pdBits cacheLineBits_leq_pdBits) + +lemma cacheLineBits_leq_pbfs: + "cacheLineBits \ pageBitsForSize sz" + by (rule order.trans, rule cacheLineBits_leq_pageBits, rule pbfs_atleast_pageBits) + +lemma addrFromPPtr_mask_cacheLineBits[simp]: + "addrFromPPtr ptr && mask cacheLineBits = ptr && mask cacheLineBits" + by (rule addrFromPPtr_mask_ARMSuperSection, rule cacheLineBits_leq_pbfs) + +lemma shiftr_cacheLineBits_less_mask_word_bits: + "x >> cacheLineBits < mask word_bits" for x :: machine_word + using shiftr_less_max_mask[where n=cacheLineBits and x=x] cacheLineBits_sanity + by (simp add: word_bits_def) + +(* end of Kernel_Config interface section *) + abbreviation(input) NotificationObject :: sword32 where diff --git a/proof/crefine/ARM_HYP/ArchMove_C.thy b/proof/crefine/ARM_HYP/ArchMove_C.thy index 9d898f404a..31054c9437 100644 --- a/proof/crefine/ARM_HYP/ArchMove_C.thy +++ b/proof/crefine/ARM_HYP/ArchMove_C.thy @@ -617,17 +617,6 @@ crunch insertNewCap, Arch_createNewCaps, threadSet, Arch.createObject, setThread simp: unless_def updateObject_default_def crunch_simps ignore_del: preemptionPoint) -(* this could be done as - lemmas addrFromPPtr_mask_6 = addrFromPPtr_mask[where n=6, simplified] - but that wouldn't give a sanity check of the n \ ... assumption disappearing *) -lemma addrFromPPtr_mask_6: - "addrFromPPtr ptr && mask 6 = ptr && mask 6" - by (rule addrFromPPtr_mask[where n=6, simplified]) - -lemma ptrFromPAddr_mask_6: - "ptrFromPAddr ps && mask 6 = ps && mask 6" - by (rule ptrFromPAddr_mask[where n=6, simplified]) - end end diff --git a/proof/crefine/ARM_HYP/Arch_C.thy b/proof/crefine/ARM_HYP/Arch_C.thy index 78f7debf44..0ead82e81a 100644 --- a/proof/crefine/ARM_HYP/Arch_C.thy +++ b/proof/crefine/ARM_HYP/Arch_C.thy @@ -1742,8 +1742,7 @@ lemma performPageInvocationMapPTE_ccorres: apply (subst is_aligned_no_wrap', assumption, fastforce simp: field_simps) apply (subst add_diff_eq [symmetric], subst is_aligned_no_wrap', assumption, fastforce simp: field_simps) apply simp - apply (clarsimp simp: pte_range_relation_def ptr_add_def ptr_range_to_list_def - addrFromPPtr_mask_6) + apply (clarsimp simp: pte_range_relation_def ptr_add_def ptr_range_to_list_def) apply (auto simp: valid_pte_slots'2_def upt_conv_Cons[where i=0])[1] apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem hd_conv_nth last_conv_nth ucast_minus) apply (clarsimp simp: pte_range_relation_def ptr_range_to_list_def objBits_simps @@ -2122,8 +2121,7 @@ lemma performPageInvocationMapPDE_ccorres: apply (subst is_aligned_no_wrap', assumption, fastforce simp: field_simps) apply (subst add_diff_eq [symmetric], subst is_aligned_no_wrap', assumption, fastforce simp: field_simps) apply simp - apply (clarsimp simp: pde_range_relation_def ptr_add_def ptr_range_to_list_def - addrFromPPtr_mask_6) + apply (clarsimp simp: pde_range_relation_def ptr_add_def ptr_range_to_list_def) apply (auto simp: valid_pde_slots'2_def upt_conv_Cons[where i=0])[1] apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem hd_conv_nth last_conv_nth ucast_minus) apply (clarsimp simp: pde_range_relation_def ptr_range_to_list_def objBits_simps @@ -3149,7 +3147,7 @@ lemma decodeARMFrameInvocation_ccorres: intro conjI, (solves \frule vmsz_aligned_addrFromPPtr(3)[THEN iffD2], (subst mask_add_aligned mask_add_aligned_right, erule is_aligned_weaken, - rule order_trans[OF _ pbfs_atleast_pageBits[simplified pageBits_def]], simp)+, + rule cacheLineBits_leq_pbfs)+, simp\), fastforce simp add: ptrFromPAddr_add_left is_aligned_no_overflow3[rotated -1])+ apply (local_method simplify_and_expand)+ @@ -3513,15 +3511,14 @@ lemma decodeARMPageDirectoryInvocation_ccorres: \ \cache flush constraints\ subgoal for _ _ _ _ _ _ sz p - using pbfs_atleast_pageBits[simplified pageBits_def, of sz] + using pbfs_atleast_pageBits[of sz] cacheLineBits_leq_pageBits apply (intro conjI) apply (erule flush_range_le) apply (simp add:linorder_not_le) apply (erule word_less_sub_1) apply (simp add:mask_add_aligned mask_twice) apply (fastforce simp: mask_twice - mask_add_aligned[OF is_aligned_pageBitsForSize_minimum, - simplified pageBits_def]) + mask_add_aligned[OF is_aligned_pageBitsForSize_minimum]) apply (simp add: ptrFromPAddr_add_left) apply (erule flush_range_le) apply (simp add:linorder_not_le) diff --git a/proof/crefine/ARM_HYP/Invoke_C.thy b/proof/crefine/ARM_HYP/Invoke_C.thy index 84432c6231..0811a0756d 100644 --- a/proof/crefine/ARM_HYP/Invoke_C.thy +++ b/proof/crefine/ARM_HYP/Invoke_C.thy @@ -1715,7 +1715,7 @@ lemma clearMemory_untyped_ccorres: word_of_nat_less Kernel_Config.resetChunkBits_def word_bits_def unat_2p_sub_1) apply (strengthen is_aligned_no_wrap'[where sz=sz] is_aligned_addrFromPPtr_n)+ - apply (simp add: addrFromPPtr_mask) + apply simp apply (cases "ptr = 0") apply (drule subsetD, rule intvl_self, simp) apply (simp split: if_split_asm) diff --git a/proof/crefine/ARM_HYP/Machine_C.thy b/proof/crefine/ARM_HYP/Machine_C.thy index 2ad8e55adf..81a08b6acf 100644 --- a/proof/crefine/ARM_HYP/Machine_C.thy +++ b/proof/crefine/ARM_HYP/Machine_C.thy @@ -402,39 +402,38 @@ lemma index_xf_for_sequence: (* FIXME CLEANUP on all arches: this entire cache (section) has: - a number of useful word lemmas that can go into WordLib - - a ton of hardcoded "mask 6" and "64", which on sabre is "mask 5" and "32" respectively. - The proofs themselves are extremely similar. This can be much more generic! *) lemma lineStart_le_mono: "x \ y \ lineStart x \ lineStart y" - by (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 neg_mask_mono_le) + by (clarsimp simp: lineStart_def shiftr_shiftl1 neg_mask_mono_le) lemma lineStart_sub: - "\ x && mask 6 = y && mask 6\ \ lineStart (x - y) = lineStart x - lineStart y" - apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1) + "\ x && mask cacheLineBits = y && mask cacheLineBits\ \ lineStart (x - y) = lineStart x - lineStart y" + apply (clarsimp simp: lineStart_def shiftr_shiftl1) apply (clarsimp simp: mask_out_sub_mask) apply (clarsimp simp: mask_eqs(8)[symmetric]) done lemma lineStart_mask: - "lineStart x && mask 6 = 0" - by (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_AND_NOT_mask) + "lineStart x && mask cacheLineBits = 0" + by (clarsimp simp: lineStart_def shiftr_shiftl1 mask_AND_NOT_mask) lemma cachRangeOp_corres_helper: - "\w1 \ w2; w3 \ w3 + (w2 - w1); w1 && mask 6 = w3 && mask 6\ - \ unat (lineStart w2 - lineStart w1) div 64 = - unat (lineStart (w3 + (w2 - w1)) - lineStart w3) div 64" + "\w1 \ w2; w3 \ w3 + (w2 - w1); w1 && mask cacheLineBits = w3 && mask cacheLineBits\ + \ unat (lineStart w2 - lineStart w1) div (2^cacheLineBits) = + unat (lineStart (w3 + (w2 - w1)) - lineStart w3) div (2^cacheLineBits)" apply (subst dvd_div_div_eq_mult, simp) - apply (clarsimp simp: and_mask_dvd_nat[where n=6, simplified]) - apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1) + apply (clarsimp simp: and_mask_dvd_nat[where n=cacheLineBits, simplified]) + apply (clarsimp simp: lineStart_def shiftr_shiftl1) apply (subst mask_eqs(8)[symmetric]) apply (clarsimp simp: mask_AND_NOT_mask) - apply (clarsimp simp: and_mask_dvd_nat[where n=6, simplified]) + apply (clarsimp simp: and_mask_dvd_nat[where n=cacheLineBits, simplified]) apply (subst mask_eqs(8)[symmetric]) apply (clarsimp simp: lineStart_mask) - apply (subgoal_tac "w3 + (w2 - w1) && mask 6 = w2 && mask 6") + apply (subgoal_tac "w3 + (w2 - w1) && mask cacheLineBits = w2 && mask cacheLineBits") apply clarsimp apply (rule_tac x=w1 and y=w3 in linorder_le_cases) apply (subgoal_tac "lineStart (w3 + (w2 - w1)) - lineStart w2 = lineStart w3 - lineStart w1") @@ -482,31 +481,35 @@ lemma lineIndex_def2: lemma lineIndex_le_mono: "x \ y \ lineIndex x \ lineIndex y" - by (clarsimp simp: lineIndex_def2 cacheLineBits_def le_shiftr) + by (clarsimp simp: lineIndex_def2 le_shiftr) lemma lineIndex_lineStart_diff: - "w1 \ w2 \ (unat (lineStart w2 - lineStart w1) div 64) = unat (lineIndex w2 - lineIndex w1)" - apply (subst shiftr_div_2n'[symmetric, where n=6, simplified]) + "w1 \ w2 \ + unat (lineStart w2 - lineStart w1) div (2^cacheLineBits) = unat (lineIndex w2 - lineIndex w1)" + apply (subst shiftr_div_2n'[symmetric, where n=cacheLineBits, simplified]) apply (drule lineStart_le_mono) apply (drule sub_right_shift[OF lineStart_mask lineStart_mask]) - apply (simp add: lineIndex_def cacheLineBits_def) + apply (simp add: lineIndex_def) done +lemma unat_cacheLine_machine_word[simp]: + "unat ((2::machine_word)^cacheLineBits) = 2^cacheLineBits" + by (rule unat_p2, rule cacheLineBits_le_machine_word) + lemma cacheRangeOp_ccorres: "\\x y. empty_fail (oper x y); \n. ccorres dc xfdc \ (\\index = lineIndex w1 + of_nat n\) hs - (doMachineOp (oper (lineStart w1 + of_nat n * 0x40) - (lineStart w3 + of_nat n * 0x40))) + (doMachineOp (oper (lineStart w1 + of_nat n * (2^cacheLineBits)) + (lineStart w3 + of_nat n * (2^cacheLineBits)))) f; \s. \\\<^bsub>/UNIV\<^esub> {s} f ({t. index_' t = index_' s}) \ \ ccorres dc xfdc (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask 6 = w3 && mask 6) - (\\index = w1 >> 6\) hs + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits) + (\\index = w1 >> cacheLineBits\) hs (doMachineOp (cacheRangeOp oper w1 w2 w3)) - (While \\index < (w2 >> 6) + 1\ + (While \\index < (w2 >> cacheLineBits) + 1\ (f;; \index :== \index + 1))" - apply (clarsimp simp: cacheRangeOp_def doMachineOp_mapM_x split_def - cacheLine_def cacheLineBits_def) + apply (clarsimp simp: cacheRangeOp_def doMachineOp_mapM_x split_def cacheLine_def) apply (rule ccorres_gen_asm[where G=\, simplified]) apply (rule ccorres_guard_imp) apply (rule ccorres_rel_imp) @@ -520,7 +523,7 @@ lemma cacheRangeOp_ccorres: apply (subst min_absorb1[OF order_eq_refl]) apply (erule (2) cachRangeOp_corres_helper) apply (simp add: lineIndex_lineStart_diff) - apply (simp add: lineIndex_def2 cacheLineBits_def) + apply (simp add: lineIndex_def2) apply unat_arith apply wp apply (clarsimp simp: length_upto_enum_step lineStart_le_mono unat_div) @@ -529,36 +532,41 @@ lemma cacheRangeOp_ccorres: apply (simp add: lineIndex_lineStart_diff unat_sub[OF lineIndex_le_mono]) apply (subst le_add_diff_inverse) apply (simp add: lineIndex_le_mono word_le_nat_alt[symmetric]) - apply (simp add: lineIndex_def2 cacheLineBits_def) - apply (rule unat_mono[where 'a=32 and b="0xFFFFFFFF", simplified]) - apply word_bitwise - apply (simp add: lineIndex_def cacheLineBits_def lineStart_def) + apply (simp add: lineIndex_def2) + apply (rule less_le_trans) + apply (rule unat_mono[where 'a=machine_word_len and b="mask word_bits"]) + apply (rule shiftr_cacheLineBits_less_mask_word_bits) + apply (simp add: mask_def word_bits_def unat_max_word) + apply (simp add: lineIndex_def lineStart_def) done lemma lineStart_eq_minus_mask: - "lineStart w1 = w1 - (w1 && mask 6)" - by (simp add: lineStart_def cacheLineBits_def mask_out_sub_mask[symmetric] and_not_mask) + "lineStart w1 = w1 - (w1 && mask cacheLineBits)" + by (simp add: lineStart_def mask_out_sub_mask[symmetric] and_not_mask) lemma lineStart_idem[simp]: "lineStart (lineStart x) = lineStart x" - by (simp add: lineStart_def cacheLineBits_def) + by (simp add: lineStart_def) lemma cache_range_lineIndex_helper: - "lineIndex w1 + of_nat n << 6 = w1 - (w1 && mask 6) + of_nat n * 0x40" - apply (clarsimp simp: lineIndex_def cacheLineBits_def word_shiftl_add_distrib lineStart_def[symmetric, unfolded cacheLineBits_def] lineStart_eq_minus_mask[symmetric]) + "lineIndex w1 + of_nat n << cacheLineBits = + w1 - (w1 && mask cacheLineBits) + of_nat n * (2^cacheLineBits)" + apply (clarsimp simp: lineIndex_def word_shiftl_add_distrib lineStart_def[symmetric] + lineStart_eq_minus_mask[symmetric]) apply (simp add: shiftl_t2n) done lemma cleanCacheRange_PoC_ccorres: "ccorres dc xfdc (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask 6 = w3 && mask 6) + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (cleanCacheRange_PoC w1 w2 w3)) (Call cleanCacheRange_PoC_'proc)" apply (rule ccorres_gen_asm[where G=\, simplified]) apply (cinit' lift: start_' end_' pstart_') + apply (fold cacheLineBits_val) (* make CACHE_LINE_SIZE_BITS value symbolic *) apply (clarsimp simp: cleanCacheRange_PoC_def word_sle_def whileAnno_def) apply csymbr apply (rule cacheRangeOp_ccorres) @@ -568,22 +576,24 @@ lemma cleanCacheRange_PoC_ccorres: apply (rule ccorres_guard_imp2) apply csymbr apply (ctac add: cleanByVA_ccorres) - apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 - mask_out_sub_mask) - apply (drule_tac s="w1 && mask 6" in sym, simp add: cache_range_lineIndex_helper) + apply (clarsimp simp: lineStart_def shiftr_shiftl1 mask_out_sub_mask) + apply (drule_tac s="w1 && mask cacheLineBits" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=cleanByVA_modifies) apply clarsimp done lemma cleanInvalidateCacheRange_RAM_ccorres: - "ccorres dc xfdc ((\s. unat (w2 - w1) \ gsMaxObjectSize s) - and (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask 6 = w3 && mask 6 \ unat (w2 - w2) \ gsMaxObjectSize s)) - (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] + "ccorres dc xfdc + ((\s. unat (w2 - w1) \ gsMaxObjectSize s) and + (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) \ + w1 && mask cacheLineBits = w3 && mask cacheLineBits \ + unat (w2 - w2) \ gsMaxObjectSize s)) + (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (cleanInvalidateCacheRange_RAM w1 w2 w3)) (Call cleanInvalidateCacheRange_RAM_'proc)" apply (rule ccorres_gen_asm) apply (cinit' lift: start_' end_' pstart_') + apply (fold cacheLineBits_val) (* make CACHE_LINE_SIZE_BITS value symbolic *) apply (clarsimp simp: word_sle_def whileAnno_def) apply (rule ccorres_Guard_Seq) apply (rule ccorres_basic_srnoop) @@ -602,9 +612,8 @@ lemma cleanInvalidateCacheRange_RAM_ccorres: apply (rule ccorres_guard_imp2) apply csymbr apply (ctac add: cleanInvalByVA_ccorres) - apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 - mask_out_sub_mask) - apply (drule_tac s="w1 && mask 6" in sym, simp add: cache_range_lineIndex_helper) + apply (clarsimp simp: lineStart_def shiftr_shiftl1 mask_out_sub_mask) + apply (drule_tac s="w1 && mask cacheLineBits" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=cleanInvalByVA_modifies) apply (rule ceqv_refl) apply (ctac (no_vcg) add: dsb_ccorres) @@ -615,7 +624,7 @@ lemma cleanInvalidateCacheRange_RAM_ccorres: lemma cleanCacheRange_RAM_ccorres: "ccorres dc xfdc (\s. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask 6 = w3 && mask 6 + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits \ unat (w2 - w1) \ gsMaxObjectSize s) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (cleanCacheRange_RAM w1 w2 w3)) @@ -640,12 +649,13 @@ lemma cleanCacheRange_RAM_ccorres: lemma cleanCacheRange_PoU_ccorres: "ccorres dc xfdc ((\s. unat (w2 - w1) \ gsMaxObjectSize s) and (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask 6 = w3 && mask 6)) + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits)) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (cleanCacheRange_PoU w1 w2 w3)) (Call cleanCacheRange_PoU_'proc)" apply (rule ccorres_gen_asm) apply (cinit' lift: start_' end_' pstart_') + apply (fold cacheLineBits_val) (* make CACHE_LINE_SIZE_BITS value symbolic *) apply (clarsimp simp: word_sle_def whileAnno_def) apply (rule ccorres_Guard_Seq) apply (rule ccorres_basic_srnoop2, simp) @@ -658,9 +668,8 @@ lemma cleanCacheRange_PoU_ccorres: apply (rule ccorres_guard_imp2) apply csymbr apply (ctac add: cleanByVA_PoU_ccorres) - apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 - mask_out_sub_mask) - apply (drule_tac s="w1 && mask 6" in sym, simp add: cache_range_lineIndex_helper) + apply (clarsimp simp: lineStart_def shiftr_shiftl1 mask_out_sub_mask) + apply (drule_tac s="w1 && mask cacheLineBits" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=cleanByVA_PoU_modifies) apply clarsimp apply (frule(1) ghost_assertion_size_logic) @@ -674,12 +683,13 @@ lemma dmo_if: lemma invalidateCacheRange_RAM_ccorres: "ccorres dc xfdc ((\s. unat (w2 - w1) \ gsMaxObjectSize s) and (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask 6 = w3 && mask 6)) + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits)) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (invalidateCacheRange_RAM w1 w2 w3)) (Call invalidateCacheRange_RAM_'proc)" apply (rule ccorres_gen_asm) apply (cinit' lift: start_' end_' pstart_') + apply (fold cacheLineBits_val) (* make CACHE_LINE_SIZE_BITS value symbolic *) apply (clarsimp simp: word_sle_def whileAnno_def split del: if_split) apply (simp add: invalidateCacheRange_RAM_def doMachineOp_bind when_def empty_fail_invalidateL2Range empty_fail_invalidateByVA @@ -687,19 +697,18 @@ lemma invalidateCacheRange_RAM_ccorres: split del: if_split) apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_cond[where R=\]) - apply (clarsimp simp: lineStart_def cacheLineBits_def) + apply (clarsimp simp: lineStart_def) apply (rule ccorres_call[OF cleanCacheRange_RAM_ccorres, where xf'=xfdc], (clarsimp)+) apply (rule ccorres_return_Skip) apply ceqv apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_cond[where R=\]) - apply (clarsimp simp: lineStart_def cacheLineBits_def) + apply (clarsimp simp: lineStart_def) apply csymbr apply (rule ccorres_call[OF cleanCacheRange_RAM_ccorres, where xf'=xfdc], (clarsimp)+) apply (rule ccorres_return_Skip) apply ceqv - apply (rule_tac P="\s. unat (w2 - w1) \ gsMaxObjectSize s" - in ccorres_cross_over_guard) + apply (rule_tac P="\s. unat (w2 - w1) \ gsMaxObjectSize s" in ccorres_cross_over_guard) apply (rule ccorres_Guard_Seq) apply (rule ccorres_basic_srnoop2, simp) apply (ctac add: plat_invalidateL2Range_ccorres) @@ -714,9 +723,8 @@ lemma invalidateCacheRange_RAM_ccorres: apply (rule ccorres_guard_imp2) apply csymbr apply (ctac add: invalidateByVA_ccorres) - apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 - mask_out_sub_mask) - apply (drule_tac s="w1 && mask 6" in sym, simp add: cache_range_lineIndex_helper) + apply (clarsimp simp: lineStart_def shiftr_shiftl1 mask_out_sub_mask) + apply (drule_tac s="w1 && mask cacheLineBits" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=invalidateByVA_modifies) apply ceqv apply (ctac add: dsb_ccorres) @@ -728,7 +736,7 @@ lemma invalidateCacheRange_RAM_ccorres: apply (simp add: guard_is_UNIV_def) apply (auto dest: ghost_assertion_size_logic simp: o_def)[1] apply (wp | clarsimp split: if_split)+ - apply (clarsimp simp: lineStart_def cacheLineBits_def guard_is_UNIV_def) + apply (clarsimp simp: lineStart_def guard_is_UNIV_def) apply (clarsimp simp: lineStart_mask) apply (subst mask_eqs(7)[symmetric]) apply (subst mask_eqs(8)[symmetric]) @@ -737,7 +745,7 @@ lemma invalidateCacheRange_RAM_ccorres: lemma invalidateCacheRange_I_ccorres: "ccorres dc xfdc (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask 6 = w3 && mask 6) + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (invalidateCacheRange_I w1 w2 w3)) (Call invalidateCacheRange_I_'proc)" @@ -750,12 +758,13 @@ lemma invalidateCacheRange_I_ccorres: lemma branchFlushRange_ccorres: "ccorres dc xfdc (\_. w1 \ w2 \ w3 \ w3 + (w2 - w1) - \ w1 && mask 6 = w3 && mask 6) + \ w1 && mask cacheLineBits = w3 && mask cacheLineBits) (\\start = w1\ \ \\end = w2\ \ \\pstart = w3\) [] (doMachineOp (branchFlushRange w1 w2 w3)) (Call branchFlushRange_'proc)" apply (rule ccorres_gen_asm[where G=\, simplified]) apply (cinit' lift: start_' end_' pstart_') + apply (fold cacheLineBits_val) (* make CACHE_LINE_SIZE_BITS value symbolic *) apply (clarsimp simp: word_sle_def whileAnno_def) apply (simp add: branchFlushRange_def) apply csymbr @@ -766,9 +775,8 @@ lemma branchFlushRange_ccorres: apply (rule ccorres_guard_imp2) apply csymbr apply (ctac add: branchFlush_ccorres) - apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 - mask_out_sub_mask) - apply (drule_tac s="w1 && mask 6" in sym, simp add: cache_range_lineIndex_helper) + apply (clarsimp simp: lineStart_def shiftr_shiftl1 mask_out_sub_mask) + apply (drule_tac s="w1 && mask cacheLineBits" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=branchFlush_modifies) apply clarsimp done diff --git a/proof/crefine/ARM_HYP/Retype_C.thy b/proof/crefine/ARM_HYP/Retype_C.thy index a6762ccc4e..b257c9843c 100644 --- a/proof/crefine/ARM_HYP/Retype_C.thy +++ b/proof/crefine/ARM_HYP/Retype_C.thy @@ -5887,7 +5887,8 @@ proof - apply (intro conjI, simp_all add: table_bits_defs)[1] apply fastforce apply ((clarsimp simp: is_aligned_no_overflow'[where n=14, simplified] - field_simps is_aligned_mask[symmetric] mask_AND_less_0)+)[3] + field_simps is_aligned_mask[symmetric] mask_AND_less_0 + cacheLineBits_le_ptBits[unfolded ptBits_def pteBits_def, simplified])+)[3] \ \VCPU\ apply (cinit' lift: t_' regionBase_' userSize_' deviceMemory_') apply (simp add: object_type_from_H_def Kernel_C_defs) diff --git a/proof/crefine/ARM_HYP/VSpace_C.thy b/proof/crefine/ARM_HYP/VSpace_C.thy index ef4d6dd55c..16a7b77a60 100644 --- a/proof/crefine/ARM_HYP/VSpace_C.thy +++ b/proof/crefine/ARM_HYP/VSpace_C.thy @@ -2668,10 +2668,12 @@ definition | ARM_HYP_H.flush_type.Unify \ (label = Kernel_C.ARMPageUnify_Instruction \ label = Kernel_C.ARMPDUnify_Instruction)" lemma doFlush_ccorres: - "ccorres dc xfdc (\s. vs \ ve \ ps \ ps + (ve - vs) \ vs && mask 6 = ps && mask 6 - \ \ahyp version translates ps into kernel virtual before flushing\ - \ ptrFromPAddr ps \ ptrFromPAddr ps + (ve - vs) - \ unat (ve - vs) \ gsMaxObjectSize s) + "ccorres dc xfdc + (\s. vs \ ve \ ps \ ps + (ve - vs) + \ vs && mask cacheLineBits = ps && mask cacheLineBits + \ \arm-hyp version translates ps into kernel virtual before flushing\ + \ ptrFromPAddr ps \ ptrFromPAddr ps + (ve - vs) + \ unat (ve - vs) \ gsMaxObjectSize s) (\flushtype_relation t \invLabel___int\ \ \\start = vs\ \ \\end = ve\ \ \\pstart = ps\) [] (doMachineOp (doFlush t vs ve ps)) (Call doFlush_'proc)" apply (cinit' lift: pstart_') @@ -2721,7 +2723,6 @@ lemma doFlush_ccorres: Kernel_C.ARMPageInvalidate_Data_def Kernel_C.ARMPDInvalidate_Data_def Kernel_C.ARMPageCleanInvalidate_Data_def Kernel_C.ARMPDCleanInvalidate_Data_def Kernel_C.ARMPageUnify_Instruction_def Kernel_C.ARMPDUnify_Instruction_def - ptrFromPAddr_mask_6 dest: ghost_assertion_size_logic[rotated] split: ARM_HYP_H.flush_type.splits) done @@ -2738,7 +2739,7 @@ context kernel_m begin lemma performPageFlush_ccorres: "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and K (asid \ mask asid_bits) - and (\s. ps \ ps + (ve - vs) \ vs && mask 6 = ps && mask 6 + and (\s. ps \ ps + (ve - vs) \ vs && mask cacheLineBits = ps && mask cacheLineBits \ ptrFromPAddr ps \ ptrFromPAddr ps + (ve - vs) \ unat (ve - vs) \ gsMaxObjectSize s)) (\\pd = Ptr pd\ \ \\asid = asid\ \ @@ -2911,7 +2912,7 @@ lemma setMessageInfo_ccorres: lemma performPageDirectoryInvocationFlush_ccorres: "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and K (asid \ mask asid_bits) - and (\s. ps \ ps + (ve - vs) \ vs && mask 6 = ps && mask 6 + and (\s. ps \ ps + (ve - vs) \ vs && mask cacheLineBits = ps && mask cacheLineBits \ ptrFromPAddr ps \ ptrFromPAddr ps + (ve - vs) \ unat (ve - vs) \ gsMaxObjectSize s)) (\\pd = Ptr pd\ \ \\asid = asid\ \ @@ -3129,18 +3130,13 @@ lemma ccorres_return_void_C': done lemma is_aligned_cache_preconds: - "\is_aligned rva n; n \ 7\ \ rva \ rva + 0x7F \ - addrFromPPtr rva \ addrFromPPtr rva + 0x7F \ rva && mask 6 = addrFromPPtr rva && mask 6" + "\is_aligned rva n; n \ 7\ \ rva \ rva + 0x7F \ addrFromPPtr rva \ addrFromPPtr rva + 0x7F" supply if_cong[cong] apply (drule is_aligned_weaken, simp) apply (rule conjI) apply (drule is_aligned_no_overflow, simp, unat_arith)[1] - apply (rule conjI) - apply (drule is_aligned_addrFromPPtr_n, simp) - apply (drule is_aligned_no_overflow, unat_arith) - apply (frule is_aligned_addrFromPPtr_n, simp) - apply (drule_tac x=7 and y=6 in is_aligned_weaken, simp)+ - apply (simp add: is_aligned_mask) + apply (drule is_aligned_addrFromPPtr_n, simp) + apply (drule is_aligned_no_overflow, unat_arith) done lemma pte_pte_invalid_new_spec: @@ -3386,7 +3382,7 @@ lemma unmapPage_ccorres: hd_map last_map typ_at_to_obj_at_arches field_simps objBits_simps archObjSize_def largePagePTEOffsets_def Let_def table_bits_defs, - clarsimp dest!: is_aligned_cache_preconds) + drule is_aligned_cache_preconds; clarsimp) apply (simp add: upto_enum_step_def upto_enum_word largePagePTEOffsets_def Let_def) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (simp add: hd_map last_map upto_enum_step_def objBits_simps archObjSize_def diff --git a/proof/crefine/ARM_HYP/Wellformed_C.thy b/proof/crefine/ARM_HYP/Wellformed_C.thy index c5dae8cd44..ec8096e374 100644 --- a/proof/crefine/ARM_HYP/Wellformed_C.thy +++ b/proof/crefine/ARM_HYP/Wellformed_C.thy @@ -529,6 +529,88 @@ lemma ucast_irq_array_guard[unfolded irq_array_size_val, simplified]: apply simp done + +text \cacheLineBits interface\ + +(* only use this inside cache op functions; see Arch_Kernel_Config_Lemmas.cacheLineBits_sanity *) +lemmas cacheLineBits_val = + cacheLineBits_def[unfolded Kernel_Config.CONFIG_L1_CACHE_LINE_SIZE_BITS_def] + +lemma cacheLineBits_le_ptBits: + "cacheLineBits \ ptBits" + using cacheLineBits_sanity + by (simp add: pt_bits_def pte_bits_def) + +(* This lemma and ptBits_leq_pdBits are for use with cacheLineBits_le_ptBits *) +lemma ptBits_leq_pageBits: + "ptBits \ pageBits" + by (simp add: pt_bits_def pte_bits_def pageBits_def) + +lemma ptBits_leq_pdBits: + "ptBits \ pdBits" + by (simp add: pt_bits_def pd_bits_def pde_bits_def pte_bits_def) + +lemma cacheLineBits_leq_pageBits: + "cacheLineBits \ pageBits" + using ptBits_leq_pageBits cacheLineBits_le_ptBits + by simp + +lemma cacheLineBits_leq_pdBits: + "cacheLineBits \ pdBits" + using ptBits_leq_pdBits cacheLineBits_le_ptBits + by simp + +lemma cacheLineBits_le_machine_word: + "cacheLineBits < LENGTH(machine_word_len)" + apply (rule le_less_trans, rule cacheLineBits_le_ptBits) + by (simp add: pt_bits_def pte_bits_def) + +lemma APIType_capBits_PageDirectoryObject_pdBits: + "APIType_capBits PageDirectoryObject us = pdBits" + by (simp add: pd_bits_def APIType_capBits_def pde_bits_def) + +lemma cacheLineBits_le_PageDirectoryObject_sz: + "cacheLineBits \ APIType_capBits PageDirectoryObject us" + using APIType_capBits_PageDirectoryObject_pdBits cacheLineBits_leq_pdBits + by simp + +lemma cacheLineBits_leq_pbfs: + "cacheLineBits \ pageBitsForSize sz" + by (rule order.trans, rule cacheLineBits_leq_pageBits, rule pbfs_atleast_pageBits) + +lemma addrFromPPtr_mask_SuperSection: + "n \ pageBitsForSize ARMSuperSection + \ addrFromPPtr ptr && mask n = ptr && mask n" + apply (simp add: addrFromPPtr_def) + apply (prop_tac "pptrBaseOffset AND mask n = 0") + apply (rule mask_zero[OF is_aligned_weaken[OF pptrBaseOffset_aligned]], simp) + apply (simp flip: mask_eqs(8)) + done + +lemma ptrFromPAddr_mask_SuperSection: + "n \ pageBitsForSize ARMSuperSection + \ ptrFromPAddr ptr && mask n = ptr && mask n" + apply (simp add: ptrFromPAddr_def) + apply (prop_tac "pptrBaseOffset AND mask n = 0") + apply (rule mask_zero[OF is_aligned_weaken[OF pptrBaseOffset_aligned]], simp) + apply (simp flip: mask_eqs(7)) + done + +lemma addrFromPPtr_mask_cacheLineBits[simp]: + "addrFromPPtr ptr && mask cacheLineBits = ptr && mask cacheLineBits" + by (rule addrFromPPtr_mask_SuperSection, rule cacheLineBits_leq_pbfs) + +lemma ptrFromPAddr_mask_cacheLineBits[simp]: + "ptrFromPAddr ptr && mask cacheLineBits = ptr && mask cacheLineBits" + by (rule ptrFromPAddr_mask_SuperSection, rule cacheLineBits_leq_pbfs) + +lemma shiftr_cacheLineBits_less_mask_word_bits: + "x >> cacheLineBits < mask word_bits" for x :: machine_word + using shiftr_less_max_mask[where n=cacheLineBits and x=x] cacheLineBits_sanity + by (simp add: word_bits_def) + +(* end of Kernel_Config interface section *) + (* Input abbreviations for API object types *) (* disambiguates names *) diff --git a/proof/invariant-abstract/ARM/ArchVSpace_AI.thy b/proof/invariant-abstract/ARM/ArchVSpace_AI.thy index 765c51f107..00ae764be0 100644 --- a/proof/invariant-abstract/ARM/ArchVSpace_AI.thy +++ b/proof/invariant-abstract/ARM/ArchVSpace_AI.thy @@ -3409,17 +3409,13 @@ lemma unmap_page_table_unmapped2: lemma cacheRangeOp_lift[wp]: assumes o: "\a b. \P\ oper a b \\_. P\" shows "\P\ cacheRangeOp oper x y z \\_. P\" - apply (clarsimp simp: cacheRangeOp_def lineStart_def cacheLineBits_def cacheLine_def) - apply (rule hoare_pre) - apply (wp mapM_x_wp_inv o) - apply (case_tac x, simp, wp o, simp) - done + unfolding cacheRangeOp_def + by (wpsimp wp: mapM_x_wp_inv o) lemma cleanCacheRange_PoU_underlying_memory[wp]: - "\\m'. underlying_memory m' p = um\ cleanCacheRange_PoU a b c \\_ m'. underlying_memory m' p = um\" + "cleanCacheRange_PoU a b c \\m'. underlying_memory m' p = um\" by (clarsimp simp: cleanCacheRange_PoU_def, wp) - lemma unmap_page_table_unmapped3: "\pspace_aligned and valid_vspace_objs and page_table_at pt and K (ref = [VSRef (vaddr >> 20) (Some APageDirectory), diff --git a/proof/invariant-abstract/ARM/Machine_AI.thy b/proof/invariant-abstract/ARM/Machine_AI.thy index 9415eb5759..f058abfa14 100644 --- a/proof/invariant-abstract/ARM/Machine_AI.thy +++ b/proof/invariant-abstract/ARM/Machine_AI.thy @@ -270,9 +270,8 @@ lemma no_fail_invalidateCacheRange_I[simp, wp]: lemma no_fail_invalidateCacheRange_RAM[simp, wp]: "no_fail \ (invalidateCacheRange_RAM s e p)" - apply (simp add: invalidateCacheRange_RAM_def lineStart_def cacheLineBits_def) - apply (wpsimp wp: no_fail_invalidateL2Range no_fail_invalidateByVA no_fail_dsb) - done + unfolding invalidateCacheRange_RAM_def + by (wpsimp wp: no_fail_invalidateL2Range no_fail_invalidateByVA no_fail_dsb) lemma no_fail_branchFlushRange[simp, wp]: "no_fail \ (branchFlushRange s e p)" @@ -586,7 +585,7 @@ lemma no_irq_when: lemma no_irq_invalidateCacheRange_RAM[simp, wp]: "no_irq (invalidateCacheRange_RAM s e p)" - apply (simp add: invalidateCacheRange_RAM_def lineStart_def cacheLineBits_def) + apply (simp add: invalidateCacheRange_RAM_def) apply (wp no_irq_invalidateL2Range no_irq_invalidateByVA no_irq_dsb no_irq_when) done @@ -701,17 +700,10 @@ lemma empty_fail_flushBTAC: "empty_fail flushBTAC" lemma empty_fail_writeContextID: "empty_fail writeContextID" by (simp add: writeContextID_def) - - lemma empty_fail_cacheRangeOp [simp, intro!]: assumes ef: "\a b. empty_fail (oper a b)" shows "empty_fail (cacheRangeOp oper s e p)" - apply (simp add: cacheRangeOp_def mapM_x_mapM lineStart_def cacheLineBits_def cacheLine_def ef) - apply (rule empty_fail_bind) - apply (rule empty_fail_mapM) - apply (auto intro: ef) - done - + by (auto simp add: cacheRangeOp_def mapM_x_mapM intro: ef) lemma empty_fail_cleanCacheRange_PoU[simp, intro!]: "empty_fail (cleanCacheRange_PoU s e p)" @@ -736,7 +728,7 @@ lemma empty_fail_invalidateCacheRange_I[simp, intro!]: lemma empty_fail_invalidateCacheRange_RAM[simp, intro!]: "empty_fail (invalidateCacheRange_RAM s e p)" - by (fastforce simp: invalidateCacheRange_RAM_def lineStart_def cacheLineBits_def + by (fastforce simp: invalidateCacheRange_RAM_def empty_fail_invalidateL2Range empty_fail_invalidateByVA empty_fail_dsb) lemma empty_fail_branchFlushRange[simp, intro!]: diff --git a/proof/invariant-abstract/ARM_HYP/ArchInvariants_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchInvariants_AI.thy index 175dcedbac..e44be22bd9 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchInvariants_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchInvariants_AI.thy @@ -2624,24 +2624,6 @@ lemma is_aligned_ptrFromPAddrD[simplified pageBitsForSize_simps]: by (simp add: ptrFromPAddr_def) (erule is_aligned_addD2, erule is_aligned_weaken[OF pptrBaseOffset_aligned]) -lemma addrFromPPtr_mask[simplified ARM_HYP.pageBitsForSize_simps]: - "n \ pageBitsForSize ARMSuperSection - \ addrFromPPtr ptr && mask n = ptr && mask n" - apply (simp add: addrFromPPtr_def) - apply (prop_tac "pptrBaseOffset AND mask n = 0") - apply (rule mask_zero[OF is_aligned_weaken[OF pptrBaseOffset_aligned]], simp) - apply (simp flip: mask_eqs(8)) - done - -lemma ptrFromPAddr_mask[simplified ARM_HYP.pageBitsForSize_simps]: - "n \ pageBitsForSize ARMSuperSection - \ ptrFromPAddr ptr && mask n = ptr && mask n" - apply (simp add: ptrFromPAddr_def) - apply (prop_tac "pptrBaseOffset AND mask n = 0") - apply (rule mask_zero[OF is_aligned_weaken[OF pptrBaseOffset_aligned]], simp) - apply (simp flip: mask_eqs(7)) - done - end declare ARM_HYP.arch_tcb_context_absorbs[simp] diff --git a/proof/invariant-abstract/ARM_HYP/ArchVSpace_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchVSpace_AI.thy index 18cf5c470a..7566151c4d 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchVSpace_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchVSpace_AI.thy @@ -4484,17 +4484,13 @@ lemma unmap_page_table_unmapped2: lemma cacheRangeOp_lift[wp]: assumes o: "\a b. \P\ oper a b \\_. P\" shows "\P\ cacheRangeOp oper x y z \\_. P\" - apply (clarsimp simp: cacheRangeOp_def lineStart_def cacheLineBits_def cacheLine_def) - apply (rule hoare_pre) - apply (wp mapM_x_wp_inv o) - apply (case_tac x, simp, wp o, simp) - done + unfolding cacheRangeOp_def + by (wpsimp wp: mapM_x_wp_inv o) lemma cleanCacheRange_PoU_underlying_memory[wp]: - "\\m'. underlying_memory m' p = um\ cleanCacheRange_PoU a b c \\_ m'. underlying_memory m' p = um\" + "cleanCacheRange_PoU a b c \\m'. underlying_memory m' p = um\" by (clarsimp simp: cleanCacheRange_PoU_def, wp) - lemma unmap_page_table_unmapped3: "\pspace_aligned and valid_vspace_objs and page_table_at pt and K (ref = [VSRef (vaddr >> 21) (Some APageDirectory), diff --git a/proof/invariant-abstract/ARM_HYP/Machine_AI.thy b/proof/invariant-abstract/ARM_HYP/Machine_AI.thy index 759dc4c9b9..16c70721cb 100644 --- a/proof/invariant-abstract/ARM_HYP/Machine_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/Machine_AI.thy @@ -279,9 +279,8 @@ lemma no_fail_invalidateCacheRange_I[simp, wp]: lemma no_fail_invalidateCacheRange_RAM[simp, wp]: "no_fail \ (invalidateCacheRange_RAM s e p)" - apply (simp add: invalidateCacheRange_RAM_def lineStart_def cacheLineBits_def) - apply (wpsimp wp: no_fail_invalidateL2Range no_fail_invalidateByVA no_fail_dsb) - done + unfolding invalidateCacheRange_RAM_def + by (wpsimp wp: no_fail_invalidateL2Range no_fail_invalidateByVA no_fail_dsb) lemma no_fail_branchFlushRange[simp, wp]: "no_fail \ (branchFlushRange s e p)" @@ -643,7 +642,7 @@ lemma no_irq_when: lemma no_irq_invalidateCacheRange_RAM[simp, wp]: "no_irq (invalidateCacheRange_RAM s e p)" - apply (simp add: invalidateCacheRange_RAM_def lineStart_def cacheLineBits_def) + apply (simp add: invalidateCacheRange_RAM_def) apply (wp no_irq_invalidateL2Range no_irq_invalidateByVA no_irq_dsb no_irq_when) done @@ -821,12 +820,7 @@ crunch readVCPUHardwareReg, writeVCPUHardwareReg, get_cntv_cval_64, set_cntv_cva lemma empty_fail_cacheRangeOp [simp, intro!]: assumes ef: "\a b. empty_fail (oper a b)" shows "empty_fail (cacheRangeOp oper s e p)" - apply (simp add: cacheRangeOp_def mapM_x_mapM lineStart_def cacheLineBits_def cacheLine_def ef) - apply (rule empty_fail_bind) - apply (rule empty_fail_mapM) - apply (auto intro: ef) - done - + by (auto simp add: cacheRangeOp_def mapM_x_mapM intro: ef) lemma empty_fail_cleanCacheRange_PoU[simp, intro!]: "empty_fail (cleanCacheRange_PoU s e p)" @@ -851,7 +845,7 @@ lemma empty_fail_invalidateCacheRange_I[simp, intro!]: lemma empty_fail_invalidateCacheRange_RAM[simp, intro!]: "empty_fail (invalidateCacheRange_RAM s e p)" - by (fastforce simp: invalidateCacheRange_RAM_def lineStart_def cacheLineBits_def + by (fastforce simp: invalidateCacheRange_RAM_def empty_fail_invalidateL2Range empty_fail_invalidateByVA empty_fail_dsb) lemma empty_fail_branchFlushRange[simp, intro!]: diff --git a/spec/haskell/src/SEL4/Machine/Hardware/ARM/Exynos4210.hs b/spec/haskell/src/SEL4/Machine/Hardware/ARM/Exynos4210.hs index ddb5547a72..3a73f7ce6a 100644 --- a/spec/haskell/src/SEL4/Machine/Hardware/ARM/Exynos4210.hs +++ b/spec/haskell/src/SEL4/Machine/Hardware/ARM/Exynos4210.hs @@ -164,9 +164,8 @@ cacheInvalidateL2RangeCallback _ _ _ = return () cacheCleanL2RangeCallback :: Ptr CallbackData -> PAddr -> PAddr -> IO () cacheCleanL2RangeCallback _ _ _ = return () --- For the ARM1136 cacheLine :: Int -cacheLine = 32 +cacheLine = error "see Kernel_Config.thy" cacheLineBits :: Int -cacheLineBits = 5 +cacheLineBits = error "see Kernel_Config.thy" diff --git a/spec/haskell/src/SEL4/Machine/Hardware/ARM/KZM.hs b/spec/haskell/src/SEL4/Machine/Hardware/ARM/KZM.hs index f53bc143fb..b4523ddbf3 100644 --- a/spec/haskell/src/SEL4/Machine/Hardware/ARM/KZM.hs +++ b/spec/haskell/src/SEL4/Machine/Hardware/ARM/KZM.hs @@ -161,9 +161,8 @@ cacheInvalidateL2RangeCallback _ _ _ = return () cacheCleanL2RangeCallback :: Ptr CallbackData -> PAddr -> PAddr -> IO () cacheCleanL2RangeCallback _ _ _ = return () --- For the ARM1136 cacheLine :: Int -cacheLine = 32 +cacheLine = error "see Kernel_Config.thy" cacheLineBits :: Int -cacheLineBits = 5 +cacheLineBits = error "see Kernel_Config.thy" diff --git a/spec/haskell/src/SEL4/Machine/Hardware/ARM/Sabre.hs b/spec/haskell/src/SEL4/Machine/Hardware/ARM/Sabre.hs index 9ffd5ee5eb..601c9c5dc4 100644 --- a/spec/haskell/src/SEL4/Machine/Hardware/ARM/Sabre.hs +++ b/spec/haskell/src/SEL4/Machine/Hardware/ARM/Sabre.hs @@ -161,9 +161,8 @@ cacheInvalidateL2RangeCallback _ _ _ = return () cacheCleanL2RangeCallback :: Ptr CallbackData -> PAddr -> PAddr -> IO () cacheCleanL2RangeCallback _ _ _ = return () --- FIXME: This is not correct now, we do not have l2cc interface abstracted. cacheLine :: Int -cacheLine = 32 +cacheLine = error "see Kernel_Config.thy" cacheLineBits :: Int -cacheLineBits = 5 +cacheLineBits = error "see Kernel_Config.thy" diff --git a/spec/haskell/src/SEL4/Machine/Hardware/ARM/TK1.hs b/spec/haskell/src/SEL4/Machine/Hardware/ARM/TK1.hs index 95f433f5b4..91197e9a35 100644 --- a/spec/haskell/src/SEL4/Machine/Hardware/ARM/TK1.hs +++ b/spec/haskell/src/SEL4/Machine/Hardware/ARM/TK1.hs @@ -167,9 +167,8 @@ cacheInvalidateL2RangeCallback _ _ _ = return () cacheCleanL2RangeCallback :: Ptr CallbackData -> PAddr -> PAddr -> IO () cacheCleanL2RangeCallback _ _ _ = return () --- For the ARM1136 cacheLine :: Int -cacheLine = 64 +cacheLine = error "see Kernel_Config.thy" cacheLineBits :: Int -cacheLineBits = 6 +cacheLineBits = error "see Kernel_Config.thy" diff --git a/spec/machine/AARCH64/Arch_Kernel_Config_Lemmas.thy b/spec/machine/AARCH64/Arch_Kernel_Config_Lemmas.thy index ab6dfd5121..0e5724b680 100644 --- a/spec/machine/AARCH64/Arch_Kernel_Config_Lemmas.thy +++ b/spec/machine/AARCH64/Arch_Kernel_Config_Lemmas.thy @@ -117,5 +117,16 @@ lemma maxIRQ_1_plus_eq_Suc_machine[simp]: "unat (1 + Kernel_Config.maxIRQ :: machine_word) = Suc Kernel_Config.maxIRQ" by (simp add: Kernel_Config.maxIRQ_def) + +(* cacheLineBits conditions *) + +(* Folding cacheLineBits_val in C functions only works reliably if cacheLineBits is not 1 and + not too large to conflict with other values used inside cache ops. + 12 is ptBits, which is only available after ExecSpec. Anything > 1 and smaller than ptBits + works. *) +lemma cacheLineBits_sanity: + "cacheLineBits \ {2..12}" + by (simp add: cacheLineBits_def Kernel_Config.CONFIG_L1_CACHE_LINE_SIZE_BITS_def) + end end diff --git a/spec/machine/AARCH64/Platform.thy b/spec/machine/AARCH64/Platform.thy index de18040238..095cc41d9d 100644 --- a/spec/machine/AARCH64/Platform.thy +++ b/spec/machine/AARCH64/Platform.thy @@ -52,6 +52,9 @@ abbreviation (input) "fromPAddr \ id" definition canonical_bit :: nat where "canonical_bit = 47" +definition cacheLineBits :: nat where + "cacheLineBits = CONFIG_L1_CACHE_LINE_SIZE_BITS" + definition kdevBase :: machine_word where "kdevBase = 0x000000FFFFE00000" diff --git a/spec/machine/ARM/Arch_Kernel_Config_Lemmas.thy b/spec/machine/ARM/Arch_Kernel_Config_Lemmas.thy index a842e107b3..49eff00372 100644 --- a/spec/machine/ARM/Arch_Kernel_Config_Lemmas.thy +++ b/spec/machine/ARM/Arch_Kernel_Config_Lemmas.thy @@ -119,5 +119,16 @@ lemma maxIRQ_1_plus_eq_Suc_machine[simp]: "unat (1 + maxIRQ :: machine_word) = Suc Kernel_Config.maxIRQ" by (simp add: Kernel_Config.maxIRQ_def) + +(* cacheLineBits conditions *) + +(* Folding cacheLineBits_val in C functions only works reliably if cacheLineBits is not 1 and + not too large to conflict with other values used inside cache ops. + 10 is ptBits, which is only available after ExecSpec. Anything > 1 and smaller than ptBits + works. *) +lemma cacheLineBits_sanity: + "cacheLineBits \ {2..10}" + by (simp add: cacheLineBits_def Kernel_Config.CONFIG_L1_CACHE_LINE_SIZE_BITS_def) + end end diff --git a/spec/machine/ARM/Platform.thy b/spec/machine/ARM/Platform.thy index 79a6ad7bbe..501e1ed663 100644 --- a/spec/machine/ARM/Platform.thy +++ b/spec/machine/ARM/Platform.thy @@ -43,7 +43,7 @@ definition pageColourBits :: nat where "pageColourBits \ 2" definition cacheLineBits :: nat where - "cacheLineBits = 5" + "cacheLineBits = CONFIG_L1_CACHE_LINE_SIZE_BITS" definition cacheLine :: nat where "cacheLine = 2^cacheLineBits" diff --git a/spec/machine/ARM_HYP/Arch_Kernel_Config_Lemmas.thy b/spec/machine/ARM_HYP/Arch_Kernel_Config_Lemmas.thy index a1990c0f6b..4888a72e23 100644 --- a/spec/machine/ARM_HYP/Arch_Kernel_Config_Lemmas.thy +++ b/spec/machine/ARM_HYP/Arch_Kernel_Config_Lemmas.thy @@ -131,5 +131,16 @@ lemma maxIRQ_le_mask_irq_len: using le_maxIRQ_machine_less_irqBits_val by (fastforce simp add: word_le_nat_alt word_less_nat_alt irq_len_val mask_def) + +(* cacheLineBits conditions *) + +(* Folding cacheLineBits_val in C functions only works reliably if cacheLineBits is not 1 and + not too large to conflict with other values used inside cache ops. + 12 is ptBits, which is only available after ExecSpec. Anything > 1 and smaller than ptBits + works. *) +lemma cacheLineBits_sanity: + "cacheLineBits \ {2..12}" + by (simp add: cacheLineBits_def Kernel_Config.CONFIG_L1_CACHE_LINE_SIZE_BITS_def) + end end diff --git a/spec/machine/ARM_HYP/Platform.thy b/spec/machine/ARM_HYP/Platform.thy index 1bf44f827b..666c031b67 100644 --- a/spec/machine/ARM_HYP/Platform.thy +++ b/spec/machine/ARM_HYP/Platform.thy @@ -43,7 +43,7 @@ definition pageColourBits :: nat where "pageColourBits \ 2" definition cacheLineBits :: nat where - "cacheLineBits = 6" + "cacheLineBits = CONFIG_L1_CACHE_LINE_SIZE_BITS" definition cacheLine :: nat where "cacheLine = 2^cacheLineBits" diff --git a/spec/machine/X64/Platform.thy b/spec/machine/X64/Platform.thy index eef07b6630..6113622eee 100644 --- a/spec/machine/X64/Platform.thy +++ b/spec/machine/X64/Platform.thy @@ -45,14 +45,6 @@ definition pptrUserTop :: word64 where "pptrUserTop = 0x00007fffffffffff" -definition - cacheLineBits :: nat where - "cacheLineBits = 5" - -definition - cacheLine :: nat where - "cacheLine = 2^cacheLineBits" - definition ptrFromPAddr :: "paddr \ word64" where "ptrFromPAddr paddr \ paddr + pptrBase"