1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
32 33 34 35
36
37 package ti.sysbios.family.arm.a8;
38
39 import xdc.rov.ViewInfo;
40
41 import xdc.runtime.Assert;
42
43 /*!
44 * ======== Cache ========
45 * ARM Cache Module
46 *
47 * This module manages the data and instruction caches on Cortex A8
48 * processors.
49 * It provides a list of functions that perform cache operations. The
50 * functions operate on a per cache line except for the 'All' functions
51 * which operate on the entire cache specified. Any Address that is not
52 * aligned to a cache line gets rounded down to the address of
53 * the nearest cache line.
54 *
55 * The L1 data and program caches as well as the L2 cache are enabled
56 * by default early during the startup sequence (prior to any
57 * Module_startup()s).
58 * Data caching requires the MMU to be enabled and the cacheable
59 * attribute of the section/page descriptor for a corresponding
60 * memory region to be enabled.
61 * Program caching does not require the MMU to be enabled and therefore
62 * occurs when the L1 program cache is enabled.
63 *
64 * Note: See the {@link ti.sysbios.family.arm.a8.Mmu} module for
65 * information about the MMU.
66 *
67 * Unconstrained Functions
68 * All functions
69 *
70 * @p(html)
71 * <h3> Calling Context </h3>
72 * <table border="1" cellpadding="3">
73 * <colgroup span="1"></colgroup> <colgroup span="5" align="center">
74 * </colgroup>
75 *
76 * <tr><th> Function </th><th> Hwi </th><th> Swi </th>
77 * <th> Task </th><th> Main </th><th> Startup </th></tr>
78 * <!-- -->
79 * <tr><td> {@link #disable} </td><td> Y </td><td> Y </td>
80 * <td> Y </td><td> Y </td><td> Y </td></tr>
81 * <tr><td> {@link #enable} </td><td> Y </td><td> Y </td>
82 * <td> Y </td><td> Y </td><td> Y </td></tr>
83 * <tr><td> {@link #inv} </td><td> Y </td><td> Y </td>
84 * <td> Y </td><td> Y </td><td> Y </td></tr>
85 * <tr><td> {@link #invL1dAll} </td><td> Y </td><td> Y </td>
86 * <td> Y </td><td> Y </td><td> Y </td></tr>
87 * <tr><td> {@link #invL1pAll} </td><td> Y </td><td> Y </td>
88 * <td> Y </td><td> Y </td><td> Y </td></tr>
89 * <tr><td> {@link #wait} </td><td> Y </td><td> Y </td>
90 * <td> Y </td><td> Y </td><td> Y </td></tr>
91 * <tr><td> {@link #wb} </td><td> Y </td><td> Y </td>
92 * <td> Y </td><td> Y </td><td> Y </td></tr>
93 * <tr><td> {@link #wbInv} </td><td> Y </td><td> Y </td>
94 * <td> Y </td><td> Y </td><td> Y </td></tr>
95 * <tr><td> {@link #wbInvL1dAll} </td><td> Y </td><td> Y </td>
96 * <td> Y </td><td> Y </td><td> Y </td></tr>
97 * <tr><td> {@link #wbL1dAll} </td><td> Y </td><td> Y </td>
98 * <td> Y </td><td> Y </td><td> Y </td></tr>
99 * <tr><td> {@link #lock} </td><td> Y </td><td> Y </td>
100 * <td> Y </td><td> Y </td><td> Y </td></tr>
101 * <tr><td> {@link #unlock} </td><td> Y </td><td> Y </td>
102 * <td> Y </td><td> Y </td><td> Y </td></tr>
103 * <tr><td colspan="6"> Definitions: <br />
104 * <ul>
105 * <li> <b>Hwi</b>: API is callable from a Hwi thread. </li>
106 * <li> <b>Swi</b>: API is callable from a Swi thread. </li>
107 * <li> <b>Task</b>: API is callable from a Task thread. </li>
108 * <li> <b>Main</b>: API is callable during any of these phases: </li>
109 * <ul>
110 * <li> In your module startup after this module is started
111 * (e.g. Cache_Module_startupDone() returns TRUE). </li>
112 * <li> During xdc.runtime.Startup.lastFxns. </li>
113 * <li> During main().</li>
114 * <li> During BIOS.startupFxns.</li>
115 * </ul>
116 * <li> <b>Startup</b>: API is callable during any of these phases:</li>
117 * <ul>
118 * <li> During xdc.runtime.Startup.firstFxns.</li>
119 * <li> In your module startup before this module is started
120 * (e.g. Cache_Module_startupDone() returns FALSE).</li>
121 * </ul>
122 * </ul>
123 * </td></tr>
124 *
125 * </table>
126 * @p
127 */
128
129 module Cache inherits ti.sysbios.interfaces.ICache
130 {
131 /*!
132 * Size of L1 data cache Line
133 */
134 const UInt sizeL1dCacheLine = 64;
135
136 /*!
137 * Size of L1 program cache Line
138 */
139 const UInt sizeL1pCacheLine = 64;
140
141 /*!
142 * Size of L2 cache Line
143 */
144 const UInt sizeL2CacheLine = 64;
145
146 /*!
147 * ======== ModView ========
148 * @_nodoc
149 */
150 metaonly struct CacheInfoView {
151 String cache;
152 SizeT cacheSize;
153 SizeT lineSize;
154 UInt ways;
155 SizeT waySize;
156 };
157
158 /*!
159 * ======== WayInfoView ========
160 * @_nodoc
161 */
162 metaonly struct WayInfoView {
163 UInt number;
164 Bool locked;
165 Ptr baseAddress;
166 };
167
168 /*!
169 * ======== rovViewInfo ========
170 * @_nodoc
171 */
172 @Facet
173 metaonly config ViewInfo.Instance rovViewInfo =
174 ViewInfo.create({
175 viewMap: [
176 ['Cache Info', { type: ViewInfo.MODULE_DATA,
177 viewInitFxn: 'viewInitCacheInfo',
178 structName: 'CacheInfoView'}],
179 ['L2 Way Info', { type: ViewInfo.MODULE_DATA,
180 viewInitFxn: 'viewInitWays',
181 structName: 'WayInfoView'}]
182 ]
183 });
184
185 /*! Asserted in Cache_lock */
186 config Assert.Id A_badBlockLength = {
187 msg: "A_badBlockLength: Block length too large. Must be <= L2 way size."
188 };
189
190 /*! Asserted in Cache_lock */
191 config Assert.Id A_blockCrossesPage = {
192 msg: "A_blockCrossesPage: Memory block crosses L2 way page boundary."
193 };
194
195 /*!
196 * Enable L1 and L2 data and program caches.
197 *
198 * To enable a subset of the caches, set this parameter
199 * to 'false' and call Cache_enable() within main, passing it only
200 * the {@link Cache#Type Cache_Type(s)} to be enabled.
201 *
202 * Data caching requires the MMU and the memory section/page
203 * descriptor cacheable attribute to be enabled.
204 */
205 config Bool enableCache = true;
206
207 /*!
208 * Unlock all 8 L2 cache ways at startup, default is true.
209 *
210 * Ordinarily, the L2 cache ways should all be unlocked at
211 * system startup.
212 *
213 * During development using CCS, if the application exits
214 * while L2 cache ways are locked, the soft-reset function
215 * DOES NOT unlock the L2 cache ways. To overcome this problem,
216 * the L2 cache ways are unlocked during Cache module startup.
217 *
218 * If for any reason this behavior is undesirable, setting this
219 * config parameter to false will disable the automatic unlocking
220 * of the L2 cache ways.
221 */
222 config Bool unlockL2Cache = true;
223
224 /*!
225 * Enable Branch Prediction at startup, default is true.
226 *
227 * This flag controls whether Branch Prediction should be automatically
228 * enabled or disabled during system startup.
229 *
230 * @a(NOTE)
231 * Upon reset, the A8's Program Flow Prediction (Branch Prediction)
232 * feature is disabled.
233 */
234 config Bool branchPredictionEnabled = true;
235
236 /*! @_nodoc
237 * ======== getEnabled ========
238 * Get the 'type' bitmask of cache(s) enabled.
239 */
240 @DirectCall
241 Bits16 getEnabled();
242
243 /*!
244 * ======== invL1dAll ========
245 * Invalidate all of L1 data cache.
246 *
247 * This function should be used with caution. In general, the
248 * L1 data cache may contain some stack variable or valid data
249 * that should not be invalidated. This function should be used
250 * only when all contents of L1 data cache is unwanted.
251 */
252 @DirectCall
253 Void invL1dAll();
254
255 /*!
256 * ======== invL1pAll ========
257 * Invalidate all of L1 program cache.
258 */
259 @DirectCall
260 Void invL1pAll();
261
262 /*!
263 * ======== lock ========
264 * Loads and locks a memory block into the L2 cache.
265 *
266 * A block of memory is loaded into the L2 cache and
267 * a corresponding L2 cache "way" is locked.
268 *
269 * The memory block is loaded into cache one L2 cache line at time.
270 *
271 * The returned key is a bitmask of the L2 cache "way"
272 * used to lock the memory block.
273 * This key should be passed in
274 * a subsequent call to {@link #unlock Cache_unlock()}
275 * if the memory block is to be unlocked.
276 *
277 * If the key returned is zero, then the lock operation failed
278 * due to insufficient cache "ways" remaining to perform the operation.
279 * The locking algorithm requires at least two unlocked cache ways:
280 * one for the memory block, and one for the locking code itself.
281 *
282 * As the A8 L2 cache is always an 8 way cache, locking a cache way
283 * consumes 1/8 of the total L2 cache, regardless of the actual memory
284 * block size. For instance, if the size of L2 cache is 256K bytes,
285 * locking ANY size memory block into a way will tie up 32K bytes
286 * of L2 cache.
287 *
288 * The byteCnt argument must be less than or equal to an L2 "way"
289 * size.
290 * Locking memory blocks larger than a way page size requires
291 * calling this API multiple times.
292 * An assert is generated if this rule is violated.
293 *
294 * The memory block must not cross an L2 "way" page boundary.
295 * Locking memory blocks that cross way page boundaries requires
296 * calling this API multiple times.
297 * An assert is generated if this rule is violated.
298 *
299 * Except for the normal L1 instruction cache behavior
300 * during code execution, the L1 instruction cache is
301 * unaffected by this API.
302 * The L1 data cache will be temporarily polluted by the contents
303 * of the referenced memory block.
304 *
305 * @a(NOTE)
306 * Interrupts are disabled for the entire time the memory block
307 * is being loaded into cache. For this reason, use of this API
308 * is probably best at system intialization time
309 * (ie: within 'main()').
310 *
311 * @param(blockPtr) start address of range to be locked
312 * @param(byteCnt) number of bytes to be locked
313 * @b(returns) key = bitmask of L2 cache "way" used
314 */
315 @DirectCall
316 UInt lock(Ptr blockPtr, SizeT byteCnt);
317
318 /*!
319 * ======== unlock ========
320 * Unlocks an L2 cache way.
321 *
322 * Unlocks the L2 cache "way" locked by {@link #lock Cache_lock()}.
323 *
324 * @a(NOTE)
325 * multiple L2 cache "ways" can be unlocked simultaneously by "or-ing"
326 * together the bitmasks returned from several invocations of Cache_lock().
327 *
328 * @param(key) key returned by lock.
329 */
330 @DirectCall
331 Void unlock(UInt key);
332
333 /*!
334 * ======== enableBP ========
335 * Enable Branch Prediction
336 *
337 * Calling this API will enable branch prediction.
338 *
339 * @a(NOTE)
340 * Upon reset, the A8's Program Flow Prediction (Branch Prediction)
341 * feature is disabled.
342 */
343 @DirectCall
344 Void enableBP();
345
346 /*!
347 * ======== disableBP ========
348 * Disable Branch Prediction
349 *
350 * Calling this API will disable branch prediction.
351 *
352 * @a(NOTE)
353 * Upon reset, the A8's Program Flow Prediction (Branch Prediction)
354 * feature is disabled.
355 */
356 @DirectCall
357 Void disableBP();
358
359 internal:
360
361 /*!
362 * ======== startup ========
363 * startup function to enable cache early during climb-up
364 */
365 Void startup();
366
367 /*!
368 * ======== disableL1d ========
369 * Disable L1 data cache
370 *
371 * This function performs a write back invalidate all of
372 * L1 data cache before it disables the cache.
373 */
374 Void disableL1d();
375
376 /*!
377 * ======== disableL1p ========
378 * Disable L1 Program cache
379 *
380 * This function performs an invalidate all of L1 program cache
381 * before it disables the cache.
382 */
383 Void disableL1p();
384
385 /*!
386 * ======== disableL2 ========
387 * Disable L2 Unified Cache
388 */
389 Void disableL2();
390
391 /*!
392 * ======== enableL1d ========
393 * Enable L1 data cache.
394 */
395 Void enableL1d();
396
397 /*!
398 * ======== enableL1p ========
399 * Enable L1 program cache.
400 */
401 Void enableL1p();
402
403 /*!
404 * ======== enableL2 ========
405 * Enable L2 Unified Cache
406 */
407 Void enableL2();
408
409 /*!
410 * ======== invL1d ========
411 * Invalidates range in L1 data cache.
412 */
413 Void invL1d(Ptr blockPtr, SizeT byteCnt, Bool wait);
414
415 /*!
416 * ======== invL1p ========
417 * Invalidates range in L1 program cache.
418 */
419 Void invL1p(Ptr blockPtr, SizeT byteCnt, Bool wait);
420
421 /*!
422 * ======== getL2AuxControlReg ========
423 * get current L2 Aux Control register contents
424 */
425 Bits32 getL2AuxControlReg();
426
427 /*!
428 * ======== setL2AuxControlReg ========
429 * set L2 Aux Control register
430 */
431 Void setL2AuxControlReg(Bits32 wayMask);
432
433 /*!
434 * ======== getLockdownReg ========
435 * get current L2 Cache lockdown register contents
436 */
437 Bits32 getLockdownReg();
438
439 /*!
440 * ======== setLockdownReg ========
441 * set L2 Cache lockdown register
442 */
443 Void setLockdownReg(Bits32 wayMask);
444
445 /*!
446 * ======== wayLoadLock ========
447 * lock a block of memory into the L2 cache way specified by 'wayBit'.
448 */
449 Void wayLoadLock(Ptr blockPtr, SizeT byteCnt, UInt wayBit);
450
451 /*!
452 * ======== getCacheLevelInfo ========
453 * returns Cache Size Id Register of corresponding Cache level
454 *
455 * level values
456 * 0 = L1D
457 * 1 = L1P
458 * 2 = L2
459 */
460 Bits32 getCacheLevelInfo(UInt level);
461
462 struct Module_State {
463 Bits32 l1dInfo;
464 Bits32 l1pInfo;
465 Bits32 l2Info;
466 SizeT l2WaySize;
467 Bits32 lockRegister;
468 Ptr baseAddresses[8];
469 }
470 }
471 472 473 474
475