Home | History | Annotate | Download | only in mach-tegra
      1 // SPDX-License-Identifier: GPL-2.0
      2 /*
      3  * Copyright (c) 2016, NVIDIA CORPORATION.
      4  */
      5 
      6 #include <common.h>
      7 #include <asm/io.h>
      8 #include <asm/arch-tegra/ivc.h>
      9 
     10 #define TEGRA_IVC_ALIGN 64
     11 
     12 /*
     13  * IVC channel reset protocol.
     14  *
     15  * Each end uses its tx_channel.state to indicate its synchronization state.
     16  */
     17 enum ivc_state {
     18 	/*
     19 	 * This value is zero for backwards compatibility with services that
     20 	 * assume channels to be initially zeroed. Such channels are in an
     21 	 * initially valid state, but cannot be asynchronously reset, and must
     22 	 * maintain a valid state at all times.
     23 	 *
     24 	 * The transmitting end can enter the established state from the sync or
     25 	 * ack state when it observes the receiving endpoint in the ack or
     26 	 * established state, indicating that has cleared the counters in our
     27 	 * rx_channel.
     28 	 */
     29 	ivc_state_established = 0,
     30 
     31 	/*
     32 	 * If an endpoint is observed in the sync state, the remote endpoint is
     33 	 * allowed to clear the counters it owns asynchronously with respect to
     34 	 * the current endpoint. Therefore, the current endpoint is no longer
     35 	 * allowed to communicate.
     36 	 */
     37 	ivc_state_sync,
     38 
     39 	/*
     40 	 * When the transmitting end observes the receiving end in the sync
     41 	 * state, it can clear the w_count and r_count and transition to the ack
     42 	 * state. If the remote endpoint observes us in the ack state, it can
     43 	 * return to the established state once it has cleared its counters.
     44 	 */
     45 	ivc_state_ack
     46 };
     47 
     48 /*
     49  * This structure is divided into two-cache aligned parts, the first is only
     50  * written through the tx_channel pointer, while the second is only written
     51  * through the rx_channel pointer. This delineates ownership of the cache lines,
     52  * which is critical to performance and necessary in non-cache coherent
     53  * implementations.
     54  */
     55 struct tegra_ivc_channel_header {
     56 	union {
     57 		/* fields owned by the transmitting end */
     58 		struct {
     59 			uint32_t w_count;
     60 			uint32_t state;
     61 		};
     62 		uint8_t w_align[TEGRA_IVC_ALIGN];
     63 	};
     64 	union {
     65 		/* fields owned by the receiving end */
     66 		uint32_t r_count;
     67 		uint8_t r_align[TEGRA_IVC_ALIGN];
     68 	};
     69 };
     70 
     71 static inline void tegra_ivc_invalidate_counter(struct tegra_ivc *ivc,
     72 					struct tegra_ivc_channel_header *h,
     73 					ulong offset)
     74 {
     75 	ulong base = ((ulong)h) + offset;
     76 	invalidate_dcache_range(base, base + TEGRA_IVC_ALIGN);
     77 }
     78 
     79 static inline void tegra_ivc_flush_counter(struct tegra_ivc *ivc,
     80 					   struct tegra_ivc_channel_header *h,
     81 					   ulong offset)
     82 {
     83 	ulong base = ((ulong)h) + offset;
     84 	flush_dcache_range(base, base + TEGRA_IVC_ALIGN);
     85 }
     86 
     87 static inline ulong tegra_ivc_frame_addr(struct tegra_ivc *ivc,
     88 					 struct tegra_ivc_channel_header *h,
     89 					 uint32_t frame)
     90 {
     91 	BUG_ON(frame >= ivc->nframes);
     92 
     93 	return ((ulong)h) + sizeof(struct tegra_ivc_channel_header) +
     94 	       (ivc->frame_size * frame);
     95 }
     96 
     97 static inline void *tegra_ivc_frame_pointer(struct tegra_ivc *ivc,
     98 					    struct tegra_ivc_channel_header *ch,
     99 					    uint32_t frame)
    100 {
    101 	return (void *)tegra_ivc_frame_addr(ivc, ch, frame);
    102 }
    103 
    104 static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
    105 					struct tegra_ivc_channel_header *h,
    106 					unsigned frame)
    107 {
    108 	ulong base = tegra_ivc_frame_addr(ivc, h, frame);
    109 	invalidate_dcache_range(base, base + ivc->frame_size);
    110 }
    111 
    112 static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
    113 					 struct tegra_ivc_channel_header *h,
    114 					 unsigned frame)
    115 {
    116 	ulong base = tegra_ivc_frame_addr(ivc, h, frame);
    117 	flush_dcache_range(base, base + ivc->frame_size);
    118 }
    119 
    120 static inline int tegra_ivc_channel_empty(struct tegra_ivc *ivc,
    121 					  struct tegra_ivc_channel_header *ch)
    122 {
    123 	/*
    124 	 * This function performs multiple checks on the same values with
    125 	 * security implications, so create snapshots with ACCESS_ONCE() to
    126 	 * ensure that these checks use the same values.
    127 	 */
    128 	uint32_t w_count = ACCESS_ONCE(ch->w_count);
    129 	uint32_t r_count = ACCESS_ONCE(ch->r_count);
    130 
    131 	/*
    132 	 * Perform an over-full check to prevent denial of service attacks where
    133 	 * a server could be easily fooled into believing that there's an
    134 	 * extremely large number of frames ready, since receivers are not
    135 	 * expected to check for full or over-full conditions.
    136 	 *
    137 	 * Although the channel isn't empty, this is an invalid case caused by
    138 	 * a potentially malicious peer, so returning empty is safer, because it
    139 	 * gives the impression that the channel has gone silent.
    140 	 */
    141 	if (w_count - r_count > ivc->nframes)
    142 		return 1;
    143 
    144 	return w_count == r_count;
    145 }
    146 
    147 static inline int tegra_ivc_channel_full(struct tegra_ivc *ivc,
    148 					 struct tegra_ivc_channel_header *ch)
    149 {
    150 	/*
    151 	 * Invalid cases where the counters indicate that the queue is over
    152 	 * capacity also appear full.
    153 	 */
    154 	return (ACCESS_ONCE(ch->w_count) - ACCESS_ONCE(ch->r_count)) >=
    155 	       ivc->nframes;
    156 }
    157 
    158 static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
    159 {
    160 	ACCESS_ONCE(ivc->rx_channel->r_count) =
    161 			ACCESS_ONCE(ivc->rx_channel->r_count) + 1;
    162 
    163 	if (ivc->r_pos == ivc->nframes - 1)
    164 		ivc->r_pos = 0;
    165 	else
    166 		ivc->r_pos++;
    167 }
    168 
    169 static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
    170 {
    171 	ACCESS_ONCE(ivc->tx_channel->w_count) =
    172 			ACCESS_ONCE(ivc->tx_channel->w_count) + 1;
    173 
    174 	if (ivc->w_pos == ivc->nframes - 1)
    175 		ivc->w_pos = 0;
    176 	else
    177 		ivc->w_pos++;
    178 }
    179 
    180 static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
    181 {
    182 	ulong offset;
    183 
    184 	/*
    185 	 * tx_channel->state is set locally, so it is not synchronized with
    186 	 * state from the remote peer. The remote peer cannot reset its
    187 	 * transmit counters until we've acknowledged its synchronization
    188 	 * request, so no additional synchronization is required because an
    189 	 * asynchronous transition of rx_channel->state to ivc_state_ack is not
    190 	 * allowed.
    191 	 */
    192 	if (ivc->tx_channel->state != ivc_state_established)
    193 		return -ECONNRESET;
    194 
    195 	/*
    196 	 * Avoid unnecessary invalidations when performing repeated accesses to
    197 	 * an IVC channel by checking the old queue pointers first.
    198 	 * Synchronization is only necessary when these pointers indicate empty
    199 	 * or full.
    200 	 */
    201 	if (!tegra_ivc_channel_empty(ivc, ivc->rx_channel))
    202 		return 0;
    203 
    204 	offset = offsetof(struct tegra_ivc_channel_header, w_count);
    205 	tegra_ivc_invalidate_counter(ivc, ivc->rx_channel, offset);
    206 	return tegra_ivc_channel_empty(ivc, ivc->rx_channel) ? -ENOMEM : 0;
    207 }
    208 
    209 static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
    210 {
    211 	ulong offset;
    212 
    213 	if (ivc->tx_channel->state != ivc_state_established)
    214 		return -ECONNRESET;
    215 
    216 	if (!tegra_ivc_channel_full(ivc, ivc->tx_channel))
    217 		return 0;
    218 
    219 	offset = offsetof(struct tegra_ivc_channel_header, r_count);
    220 	tegra_ivc_invalidate_counter(ivc, ivc->tx_channel, offset);
    221 	return tegra_ivc_channel_full(ivc, ivc->tx_channel) ? -ENOMEM : 0;
    222 }
    223 
    224 static inline uint32_t tegra_ivc_channel_avail_count(struct tegra_ivc *ivc,
    225 	struct tegra_ivc_channel_header *ch)
    226 {
    227 	/*
    228 	 * This function isn't expected to be used in scenarios where an
    229 	 * over-full situation can lead to denial of service attacks. See the
    230 	 * comment in tegra_ivc_channel_empty() for an explanation about
    231 	 * special over-full considerations.
    232 	 */
    233 	return ACCESS_ONCE(ch->w_count) - ACCESS_ONCE(ch->r_count);
    234 }
    235 
    236 int tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc, void **frame)
    237 {
    238 	int result = tegra_ivc_check_read(ivc);
    239 	if (result < 0)
    240 		return result;
    241 
    242 	/*
    243 	 * Order observation of w_pos potentially indicating new data before
    244 	 * data read.
    245 	 */
    246 	mb();
    247 
    248 	tegra_ivc_invalidate_frame(ivc, ivc->rx_channel, ivc->r_pos);
    249 	*frame = tegra_ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos);
    250 
    251 	return 0;
    252 }
    253 
    254 int tegra_ivc_read_advance(struct tegra_ivc *ivc)
    255 {
    256 	ulong offset;
    257 	int result;
    258 
    259 	/*
    260 	 * No read barriers or synchronization here: the caller is expected to
    261 	 * have already observed the channel non-empty. This check is just to
    262 	 * catch programming errors.
    263 	 */
    264 	result = tegra_ivc_check_read(ivc);
    265 	if (result)
    266 		return result;
    267 
    268 	tegra_ivc_advance_rx(ivc);
    269 	offset = offsetof(struct tegra_ivc_channel_header, r_count);
    270 	tegra_ivc_flush_counter(ivc, ivc->rx_channel, offset);
    271 
    272 	/*
    273 	 * Ensure our write to r_pos occurs before our read from w_pos.
    274 	 */
    275 	mb();
    276 
    277 	offset = offsetof(struct tegra_ivc_channel_header, w_count);
    278 	tegra_ivc_invalidate_counter(ivc, ivc->rx_channel, offset);
    279 
    280 	if (tegra_ivc_channel_avail_count(ivc, ivc->rx_channel) ==
    281 	    ivc->nframes - 1)
    282 		ivc->notify(ivc);
    283 
    284 	return 0;
    285 }
    286 
    287 int tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc, void **frame)
    288 {
    289 	int result = tegra_ivc_check_write(ivc);
    290 	if (result)
    291 		return result;
    292 
    293 	*frame = tegra_ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos);
    294 
    295 	return 0;
    296 }
    297 
    298 int tegra_ivc_write_advance(struct tegra_ivc *ivc)
    299 {
    300 	ulong offset;
    301 	int result;
    302 
    303 	result = tegra_ivc_check_write(ivc);
    304 	if (result)
    305 		return result;
    306 
    307 	tegra_ivc_flush_frame(ivc, ivc->tx_channel, ivc->w_pos);
    308 
    309 	/*
    310 	 * Order any possible stores to the frame before update of w_pos.
    311 	 */
    312 	mb();
    313 
    314 	tegra_ivc_advance_tx(ivc);
    315 	offset = offsetof(struct tegra_ivc_channel_header, w_count);
    316 	tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
    317 
    318 	/*
    319 	 * Ensure our write to w_pos occurs before our read from r_pos.
    320 	 */
    321 	mb();
    322 
    323 	offset = offsetof(struct tegra_ivc_channel_header, r_count);
    324 	tegra_ivc_invalidate_counter(ivc, ivc->tx_channel, offset);
    325 
    326 	if (tegra_ivc_channel_avail_count(ivc, ivc->tx_channel) == 1)
    327 		ivc->notify(ivc);
    328 
    329 	return 0;
    330 }
    331 
    332 /*
    333  * ===============================================================
    334  *  IVC State Transition Table - see tegra_ivc_channel_notified()
    335  * ===============================================================
    336  *
    337  *	local	remote	action
    338  *	-----	------	-----------------------------------
    339  *	SYNC	EST	<none>
    340  *	SYNC	ACK	reset counters; move to EST; notify
    341  *	SYNC	SYNC	reset counters; move to ACK; notify
    342  *	ACK	EST	move to EST; notify
    343  *	ACK	ACK	move to EST; notify
    344  *	ACK	SYNC	reset counters; move to ACK; notify
    345  *	EST	EST	<none>
    346  *	EST	ACK	<none>
    347  *	EST	SYNC	reset counters; move to ACK; notify
    348  *
    349  * ===============================================================
    350  */
    351 int tegra_ivc_channel_notified(struct tegra_ivc *ivc)
    352 {
    353 	ulong offset;
    354 	enum ivc_state peer_state;
    355 
    356 	/* Copy the receiver's state out of shared memory. */
    357 	offset = offsetof(struct tegra_ivc_channel_header, w_count);
    358 	tegra_ivc_invalidate_counter(ivc, ivc->rx_channel, offset);
    359 	peer_state = ACCESS_ONCE(ivc->rx_channel->state);
    360 
    361 	if (peer_state == ivc_state_sync) {
    362 		/*
    363 		 * Order observation of ivc_state_sync before stores clearing
    364 		 * tx_channel.
    365 		 */
    366 		mb();
    367 
    368 		/*
    369 		 * Reset tx_channel counters. The remote end is in the SYNC
    370 		 * state and won't make progress until we change our state,
    371 		 * so the counters are not in use at this time.
    372 		 */
    373 		ivc->tx_channel->w_count = 0;
    374 		ivc->rx_channel->r_count = 0;
    375 
    376 		ivc->w_pos = 0;
    377 		ivc->r_pos = 0;
    378 
    379 		/*
    380 		 * Ensure that counters appear cleared before new state can be
    381 		 * observed.
    382 		 */
    383 		mb();
    384 
    385 		/*
    386 		 * Move to ACK state. We have just cleared our counters, so it
    387 		 * is now safe for the remote end to start using these values.
    388 		 */
    389 		ivc->tx_channel->state = ivc_state_ack;
    390 		offset = offsetof(struct tegra_ivc_channel_header, w_count);
    391 		tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
    392 
    393 		/*
    394 		 * Notify remote end to observe state transition.
    395 		 */
    396 		ivc->notify(ivc);
    397 	} else if (ivc->tx_channel->state == ivc_state_sync &&
    398 			peer_state == ivc_state_ack) {
    399 		/*
    400 		 * Order observation of ivc_state_sync before stores clearing
    401 		 * tx_channel.
    402 		 */
    403 		mb();
    404 
    405 		/*
    406 		 * Reset tx_channel counters. The remote end is in the ACK
    407 		 * state and won't make progress until we change our state,
    408 		 * so the counters are not in use at this time.
    409 		 */
    410 		ivc->tx_channel->w_count = 0;
    411 		ivc->rx_channel->r_count = 0;
    412 
    413 		ivc->w_pos = 0;
    414 		ivc->r_pos = 0;
    415 
    416 		/*
    417 		 * Ensure that counters appear cleared before new state can be
    418 		 * observed.
    419 		 */
    420 		mb();
    421 
    422 		/*
    423 		 * Move to ESTABLISHED state. We know that the remote end has
    424 		 * already cleared its counters, so it is safe to start
    425 		 * writing/reading on this channel.
    426 		 */
    427 		ivc->tx_channel->state = ivc_state_established;
    428 		offset = offsetof(struct tegra_ivc_channel_header, w_count);
    429 		tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
    430 
    431 		/*
    432 		 * Notify remote end to observe state transition.
    433 		 */
    434 		ivc->notify(ivc);
    435 	} else if (ivc->tx_channel->state == ivc_state_ack) {
    436 		/*
    437 		 * At this point, we have observed the peer to be in either
    438 		 * the ACK or ESTABLISHED state. Next, order observation of
    439 		 * peer state before storing to tx_channel.
    440 		 */
    441 		mb();
    442 
    443 		/*
    444 		 * Move to ESTABLISHED state. We know that we have previously
    445 		 * cleared our counters, and we know that the remote end has
    446 		 * cleared its counters, so it is safe to start writing/reading
    447 		 * on this channel.
    448 		 */
    449 		ivc->tx_channel->state = ivc_state_established;
    450 		offset = offsetof(struct tegra_ivc_channel_header, w_count);
    451 		tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
    452 
    453 		/*
    454 		 * Notify remote end to observe state transition.
    455 		 */
    456 		ivc->notify(ivc);
    457 	} else {
    458 		/*
    459 		 * There is no need to handle any further action. Either the
    460 		 * channel is already fully established, or we are waiting for
    461 		 * the remote end to catch up with our current state. Refer
    462 		 * to the diagram in "IVC State Transition Table" above.
    463 		 */
    464 	}
    465 
    466 	if (ivc->tx_channel->state != ivc_state_established)
    467 		return -EAGAIN;
    468 
    469 	return 0;
    470 }
    471 
    472 void tegra_ivc_channel_reset(struct tegra_ivc *ivc)
    473 {
    474 	ulong offset;
    475 
    476 	ivc->tx_channel->state = ivc_state_sync;
    477 	offset = offsetof(struct tegra_ivc_channel_header, w_count);
    478 	tegra_ivc_flush_counter(ivc, ivc->tx_channel, offset);
    479 	ivc->notify(ivc);
    480 }
    481 
    482 static int check_ivc_params(ulong qbase1, ulong qbase2, uint32_t nframes,
    483 			    uint32_t frame_size)
    484 {
    485 	int ret = 0;
    486 
    487 	BUG_ON(offsetof(struct tegra_ivc_channel_header, w_count) &
    488 	       (TEGRA_IVC_ALIGN - 1));
    489 	BUG_ON(offsetof(struct tegra_ivc_channel_header, r_count) &
    490 	       (TEGRA_IVC_ALIGN - 1));
    491 	BUG_ON(sizeof(struct tegra_ivc_channel_header) &
    492 	       (TEGRA_IVC_ALIGN - 1));
    493 
    494 	if ((uint64_t)nframes * (uint64_t)frame_size >= 0x100000000) {
    495 		pr_err("tegra_ivc: nframes * frame_size overflows\n");
    496 		return -EINVAL;
    497 	}
    498 
    499 	/*
    500 	 * The headers must at least be aligned enough for counters
    501 	 * to be accessed atomically.
    502 	 */
    503 	if ((qbase1 & (TEGRA_IVC_ALIGN - 1)) ||
    504 	    (qbase2 & (TEGRA_IVC_ALIGN - 1))) {
    505 		pr_err("tegra_ivc: channel start not aligned\n");
    506 		return -EINVAL;
    507 	}
    508 
    509 	if (frame_size & (TEGRA_IVC_ALIGN - 1)) {
    510 		pr_err("tegra_ivc: frame size not adequately aligned\n");
    511 		return -EINVAL;
    512 	}
    513 
    514 	if (qbase1 < qbase2) {
    515 		if (qbase1 + frame_size * nframes > qbase2)
    516 			ret = -EINVAL;
    517 	} else {
    518 		if (qbase2 + frame_size * nframes > qbase1)
    519 			ret = -EINVAL;
    520 	}
    521 
    522 	if (ret) {
    523 		pr_err("tegra_ivc: queue regions overlap\n");
    524 		return ret;
    525 	}
    526 
    527 	return 0;
    528 }
    529 
    530 int tegra_ivc_init(struct tegra_ivc *ivc, ulong rx_base, ulong tx_base,
    531 		   uint32_t nframes, uint32_t frame_size,
    532 		   void (*notify)(struct tegra_ivc *))
    533 {
    534 	int ret;
    535 
    536 	if (!ivc)
    537 		return -EINVAL;
    538 
    539 	ret = check_ivc_params(rx_base, tx_base, nframes, frame_size);
    540 	if (ret)
    541 		return ret;
    542 
    543 	ivc->rx_channel = (struct tegra_ivc_channel_header *)rx_base;
    544 	ivc->tx_channel = (struct tegra_ivc_channel_header *)tx_base;
    545 	ivc->w_pos = 0;
    546 	ivc->r_pos = 0;
    547 	ivc->nframes = nframes;
    548 	ivc->frame_size = frame_size;
    549 	ivc->notify = notify;
    550 
    551 	return 0;
    552 }
    553