1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
32 33 34 35
36
37 package ti.sysbios.family.arm.a8;
38
39 import xdc.rov.ViewInfo;
40
41 import xdc.runtime.Assert;
42
43 /*!
44 * ======== Cache ========
45 * ARM Cache Module
46 *
47 * This module manages the data and instruction caches on Cortex A8
48 * processors.
49 * It provides a list of functions that perform cache operations. The
50 * functions operate on a per cache line except for the 'All' functions
51 * which operate on the entire cache specified. Any Address that is not
52 * aligned to a cache line gets rounded down to the address of
53 * the nearest cache line.
54 *
55 * The L1 data and program caches as well as the L2 cache are enabled
56 * by default early during the startup sequence (prior to any
57 * Module_startup()s).
58 * Data caching requires the MMU to be enabled and the cacheable
59 * attribute of the section/page descriptor for a corresponding
60 * memory region to be enabled.
61 * Program caching does not require the MMU to be enabled and therefore
62 * occurs when the L1 program cache is enabled.
63 *
64 * Note: See the {@link ti.sysbios.family.arm.a8.Mmu} module for
65 * information about the MMU.
66 *
67 * Unconstrained Functions
68 * All functions
69 *
70 * @p(html)
71 * <h3> Calling Context </h3>
72 * <table border="1" cellpadding="3">
73 * <colgroup span="1"></colgroup> <colgroup span="5" align="center">
74 * </colgroup>
75 *
76 * <tr><th> Function </th><th> Hwi </th><th> Swi </th>
77 * <th> Task </th><th> Main </th><th> Startup </th></tr>
78 * <!-- -->
79 * <tr><td> {@link #disable} </td><td> Y </td><td> Y </td>
80 * <td> Y </td><td> Y </td><td> Y </td></tr>
81 * <tr><td> {@link #enable} </td><td> Y </td><td> Y </td>
82 * <td> Y </td><td> Y </td><td> Y </td></tr>
83 * <tr><td> {@link #inv} </td><td> Y </td><td> Y </td>
84 * <td> Y </td><td> Y </td><td> Y </td></tr>
85 * <tr><td> {@link #invL1dAll} </td><td> Y </td><td> Y </td>
86 * <td> Y </td><td> Y </td><td> Y </td></tr>
87 * <tr><td> {@link #invL1pAll} </td><td> Y </td><td> Y </td>
88 * <td> Y </td><td> Y </td><td> Y </td></tr>
89 * <tr><td> {@link #wait} </td><td> Y </td><td> Y </td>
90 * <td> Y </td><td> Y </td><td> Y </td></tr>
91 * <tr><td> {@link #wb} </td><td> Y </td><td> Y </td>
92 * <td> Y </td><td> Y </td><td> Y </td></tr>
93 * <tr><td> {@link #wbInv} </td><td> Y </td><td> Y </td>
94 * <td> Y </td><td> Y </td><td> Y </td></tr>
95 * <tr><td> {@link #wbInvL1dAll} </td><td> Y </td><td> Y </td>
96 * <td> Y </td><td> Y </td><td> Y </td></tr>
97 * <tr><td> {@link #wbL1dAll} </td><td> Y </td><td> Y </td>
98 * <td> Y </td><td> Y </td><td> Y </td></tr>
99 * <tr><td> {@link #lock} </td><td> Y </td><td> Y </td>
100 * <td> Y </td><td> Y </td><td> Y </td></tr>
101 * <tr><td> {@link #unlock} </td><td> Y </td><td> Y </td>
102 * <td> Y </td><td> Y </td><td> Y </td></tr>
103 * <tr><td colspan="6"> Definitions: <br />
104 * <ul>
105 * <li> <b>Hwi</b>: API is callable from a Hwi thread. </li>
106 * <li> <b>Swi</b>: API is callable from a Swi thread. </li>
107 * <li> <b>Task</b>: API is callable from a Task thread. </li>
108 * <li> <b>Main</b>: API is callable during any of these phases: </li>
109 * <ul>
110 * <li> In your module startup after this module is started
111 * (e.g. Cache_Module_startupDone() returns TRUE). </li>
112 * <li> During xdc.runtime.Startup.lastFxns. </li>
113 * <li> During main().</li>
114 * <li> During BIOS.startupFxns.</li>
115 * </ul>
116 * <li> <b>Startup</b>: API is callable during any of these phases:</li>
117 * <ul>
118 * <li> During xdc.runtime.Startup.firstFxns.</li>
119 * <li> In your module startup before this module is started
120 * (e.g. Cache_Module_startupDone() returns FALSE).</li>
121 * </ul>
122 * </ul>
123 * </td></tr>
124 *
125 * </table>
126 * @p
127 */
128
129 module Cache inherits ti.sysbios.interfaces.ICache
130 {
131 /*!
132 * Size of L1 data cache Line
133 */
134 const UInt sizeL1dCacheLine = 64;
135
136 /*!
137 * Size of L1 program cache Line
138 */
139 const UInt sizeL1pCacheLine = 64;
140
141 /*!
142 * Size of L2 cache Line
143 */
144 const UInt sizeL2CacheLine = 64;
145
146 /*!
147 * ======== ModView ========
148 * @_nodoc
149 */
150 metaonly struct CacheInfoView {
151 String cache;
152 SizeT cacheSize;
153 SizeT lineSize;
154 UInt ways;
155 SizeT waySize;
156 };
157
158 /*!
159 * ======== WayInfoView ========
160 * @_nodoc
161 */
162 metaonly struct WayInfoView {
163 UInt number;
164 Bool locked;
165 Ptr baseAddress;
166 };
167
168 /*!
169 * ======== rovViewInfo ========
170 * @_nodoc
171 */
172 @Facet
173 metaonly config ViewInfo.Instance rovViewInfo =
174 ViewInfo.create({
175 viewMap: [
176 ['Cache Info', { type: ViewInfo.MODULE_DATA,
177 viewInitFxn: 'viewInitCacheInfo',
178 structName: 'CacheInfoView'}],
179 ['L2 Way Info', { type: ViewInfo.MODULE_DATA,
180 viewInitFxn: 'viewInitWays',
181 structName: 'WayInfoView'}]
182 ]
183 });
184
185 /*! Asserted in Cache_lock */
186 config Assert.Id A_badBlockLength = {
187 msg: "A_badBlockLength: Block length too large. Must be <= L2 way size."
188 };
189
190 /*! Asserted in Cache_lock */
191 config Assert.Id A_blockCrossesPage = {
192 msg: "A_blockCrossesPage: Memory block crosses L2 way page boundary."
193 };
194
195 /*!
196 * Enable L1 and L2 data and program caches.
197 *
198 * To enable a subset of the caches, set this parameter
199 * to 'false' and call Cache_enable() within main, passing it only
200 * the {@link Cache#Type Cache_Type(s)} to be enabled.
201 *
202 * Data caching requires the MMU and the memory section/page
203 * descriptor cacheable attribute to be enabled.
204 */
205 config Bool enableCache = true;
206
207 /*!
208 * Unlock all 8 L2 cache ways at startup, default is true.
209 *
210 * Ordinarily, the L2 cache ways should all be unlocked at
211 * system startup.
212 *
213 * During development using CCS, if the application exits
214 * while L2 cache ways are locked, the soft-reset function
215 * DOES NOT unlock the L2 cache ways. To overcome this problem,
216 * the L2 cache ways are unlocked during Cache module startup.
217 *
218 * If for any reason this behavior is undesirable, setting this
219 * config parameter to false will disable the automatic unlocking
220 * of the L2 cache ways.
221 */
222 config Bool unlockL2Cache = true;
223
224 /*!
225 * Enable Branch Prediction at startup, default is true.
226 *
227 * This flag controls whether Branch Prediction should be automatically
228 * enabled or disabled during system startup.
229 *
230 * @a(NOTE)
231 * Upon reset, the A8's Program Flow Prediction (Branch Prediction)
232 * feature is disabled.
233 */
234 config Bool branchPredictionEnabled = true;
235
236 /*! @_nodoc
237 * ======== getEnabled ========
238 * Get the 'type' bitmask of cache(s) enabled.
239 */
240 Bits16 getEnabled();
241
242 /*!
243 * ======== invL1dAll ========
244 * Invalidate all of L1 data cache.
245 *
246 * This function should be used with caution. In general, the
247 * L1 data cache may contain some stack variable or valid data
248 * that should not be invalidated. This function should be used
249 * only when all contents of L1 data cache is unwanted.
250 */
251 Void invL1dAll();
252
253 /*!
254 * ======== invL1pAll ========
255 * Invalidate all of L1 program cache.
256 */
257 Void invL1pAll();
258
259 /*!
260 * ======== lock ========
261 * Loads and locks a memory block into the L2 cache.
262 *
263 * A block of memory is loaded into the L2 cache and
264 * a corresponding L2 cache "way" is locked.
265 *
266 * The memory block is loaded into cache one L2 cache line at time.
267 *
268 * The returned key is a bitmask of the L2 cache "way"
269 * used to lock the memory block.
270 * This key should be passed in
271 * a subsequent call to {@link #unlock Cache_unlock()}
272 * if the memory block is to be unlocked.
273 *
274 * If the key returned is zero, then the lock operation failed
275 * due to insufficient cache "ways" remaining to perform the operation.
276 * The locking algorithm requires at least two unlocked cache ways:
277 * one for the memory block, and one for the locking code itself.
278 *
279 * As the A8 L2 cache is always an 8 way cache, locking a cache way
280 * consumes 1/8 of the total L2 cache, regardless of the actual memory
281 * block size. For instance, if the size of L2 cache is 256K bytes,
282 * locking ANY size memory block into a way will tie up 32K bytes
283 * of L2 cache.
284 *
285 * The byteCnt argument must be less than or equal to an L2 "way"
286 * size.
287 * Locking memory blocks larger than a way page size requires
288 * calling this API multiple times.
289 * An assert is generated if this rule is violated.
290 *
291 * The memory block must not cross an L2 "way" page boundary.
292 * Locking memory blocks that cross way page boundaries requires
293 * calling this API multiple times.
294 * An assert is generated if this rule is violated.
295 *
296 * Except for the normal L1 instruction cache behavior
297 * during code execution, the L1 instruction cache is
298 * unaffected by this API.
299 * The L1 data cache will be temporarily polluted by the contents
300 * of the referenced memory block.
301 *
302 * @a(NOTE)
303 * Interrupts are disabled for the entire time the memory block
304 * is being loaded into cache. For this reason, use of this API
305 * is probably best at system intialization time
306 * (ie: within 'main()').
307 *
308 * @param(blockPtr) start address of range to be locked
309 * @param(byteCnt) number of bytes to be locked
310 * @b(returns) key = bitmask of L2 cache "way" used
311 */
312 UInt lock(Ptr blockPtr, SizeT byteCnt);
313
314 /*!
315 * ======== unlock ========
316 * Unlocks an L2 cache way.
317 *
318 * Unlocks the L2 cache "way" locked by {@link #lock Cache_lock()}.
319 *
320 * @a(NOTE)
321 * multiple L2 cache "ways" can be unlocked simultaneously by "or-ing"
322 * together the bitmasks returned from several invocations of Cache_lock().
323 *
324 * @param(key) key returned by lock.
325 */
326 Void unlock(UInt key);
327
328 /*!
329 * ======== enableBP ========
330 * Enable Branch Prediction
331 *
332 * Calling this API will enable branch prediction.
333 *
334 * @a(NOTE)
335 * Upon reset, the A8's Program Flow Prediction (Branch Prediction)
336 * feature is disabled.
337 */
338 Void enableBP();
339
340 /*!
341 * ======== disableBP ========
342 * Disable Branch Prediction
343 *
344 * Calling this API will disable branch prediction.
345 *
346 * @a(NOTE)
347 * Upon reset, the A8's Program Flow Prediction (Branch Prediction)
348 * feature is disabled.
349 */
350 Void disableBP();
351
352 internal:
353
354 /*!
355 * ======== startup ========
356 * startup function to enable cache early during climb-up
357 */
358 Void startup();
359
360 /*!
361 * ======== disableL1d ========
362 * Disable L1 data cache
363 *
364 * This function performs a write back invalidate all of
365 * L1 data cache before it disables the cache.
366 */
367 Void disableL1d();
368
369 /*!
370 * ======== disableL1p ========
371 * Disable L1 Program cache
372 *
373 * This function performs an invalidate all of L1 program cache
374 * before it disables the cache.
375 */
376 Void disableL1p();
377
378 /*!
379 * ======== disableL2 ========
380 * Disable L2 Unified Cache
381 */
382 Void disableL2();
383
384 /*!
385 * ======== enableL1d ========
386 * Enable L1 data cache.
387 */
388 Void enableL1d();
389
390 /*!
391 * ======== enableL1p ========
392 * Enable L1 program cache.
393 */
394 Void enableL1p();
395
396 /*!
397 * ======== enableL2 ========
398 * Enable L2 Unified Cache
399 */
400 Void enableL2();
401
402 /*!
403 * ======== invL1d ========
404 * Invalidates range in L1 data cache.
405 */
406 Void invL1d(Ptr blockPtr, SizeT byteCnt, Bool wait);
407
408 /*!
409 * ======== invL1p ========
410 * Invalidates range in L1 program cache.
411 */
412 Void invL1p(Ptr blockPtr, SizeT byteCnt, Bool wait);
413
414 /*!
415 * ======== getL2AuxControlReg ========
416 * get current L2 Aux Control register contents
417 */
418 Bits32 getL2AuxControlReg();
419
420 /*!
421 * ======== setL2AuxControlReg ========
422 * set L2 Aux Control register
423 */
424 Void setL2AuxControlReg(Bits32 wayMask);
425
426 /*!
427 * ======== getLockdownReg ========
428 * get current L2 Cache lockdown register contents
429 */
430 Bits32 getLockdownReg();
431
432 /*!
433 * ======== setLockdownReg ========
434 * set L2 Cache lockdown register
435 */
436 Void setLockdownReg(Bits32 wayMask);
437
438 /*!
439 * ======== wayLoadLock ========
440 * lock a block of memory into the L2 cache way specified by 'wayBit'.
441 */
442 Void wayLoadLock(Ptr blockPtr, SizeT byteCnt, UInt wayBit);
443
444 /*!
445 * ======== getCacheLevelInfo ========
446 * returns Cache Size Id Register of corresponding Cache level
447 *
448 * level values
449 * 0 = L1D
450 * 1 = L1P
451 * 2 = L2
452 */
453 Bits32 getCacheLevelInfo(UInt level);
454
455 struct Module_State {
456 Bits32 l1dInfo;
457 Bits32 l1pInfo;
458 Bits32 l2Info;
459 SizeT l2WaySize;
460 Bits32 lockRegister;
461 Ptr baseAddresses[8];
462 }
463 }