phy_cdc: Fix synchronization behavior

For low ratios of the two clock domains, the slow clock domain would
receive incorrect data as the fast (PHY) clock domain's data output
would have already updated with the first bit of the next byte by
the time the slow (system) clock domain read it.

This fix registers the data in the source clock domain as opposed
to the destination clock domain. This is allowed because the valid
strobe is synchronized in a way to guarantee that data has been
valid for a while when it becomes asserted.

Additionally, `phy_out.rx_data` and `phy_out.rx_data_valid` are no
longer registered in the destination (system) clock domain as this
now introduces unnecessary latency. Also, this was broken the way
it was anyway as `rx_active` and `rx_error` would've also had to be
registered to keep all timings in sync.
This commit is contained in:
Markus Koch 2024-07-06 17:58:11 +02:00
parent da7e329939
commit 943febcb99

View File

@ -44,7 +44,7 @@ architecture rtl of trashernet_phy_cdc is
signal phy_phy_in : phy_in_t;
-- Helper signals
signal rx_data_valid_i : std_logic;
signal rx_data_i : byte;
begin
assert F_CLK_PHY > 2 * F_CLK report "F_CLK_PHY must be at least 2x F_CLK" severity failure;
@ -52,6 +52,8 @@ begin
-- -------------------------------------------------------------------------
-- Drives: PHY clock domain
-- -------------------------------------------------------------------------
-- Reset synchronizer for PHY
rstsync : process(phy_clk, rst) is
begin
if rst then
@ -61,6 +63,7 @@ begin
end if;
end process rstsync;
-- Operate Trashernet in PHY clock domain
trashernet_phy_inst : entity work.trashernet_phy
generic map(
F_CLK => F_CLK_PHY
@ -75,6 +78,22 @@ begin
tx_n => tx_n
);
-- Latch data in PHY clock domain when valid is strobed
-- If the other clock domain is slower than the time it takes for the strobe to synchronize,
-- `phy_phy_out.rx_data` will already have shifted in the next bit and no longer be valid.
-- Therefore, we need to latch it here.
rxdff : process(phy_clk, rst) is
begin
if rst then
rx_data_i <= (others => '0');
elsif rising_edge(phy_clk) then
if phy_phy_out.rx_data_valid then
rx_data_i <= phy_phy_out.rx_data;
end if;
end if;
end process rxdff;
synchronizer_txen_inst : entity work.synchronizer
generic map(
SIZE => 5
@ -98,20 +117,10 @@ begin
a_in => phy_phy_out.rx_data_valid,
b_clk => clk,
b_rst => rst,
b_out => rx_data_valid_i
b_out => phy_out.rx_data_valid
);
rxdvff : process(clk, rst) is
begin
if rst then
phy_out.rx_data_valid <= '0';
phy_out.rx_data <= (others => '0'); -- Needed for yosys to compile
elsif rising_edge(clk) then
phy_out.rx_data_valid <= rx_data_valid_i;
phy_out.rx_data <= phy_phy_out.rx_data; -- Data should be stable after the time it takes the _valid signal to go through the synchronizer
end if;
end process rxdvff;
phy_out.rx_data <= rx_data_i; -- No need to synchronize in new clock domain as latched data has been stable for a while thanks to the delay in the _valid synchronizer
cdc_strobe_rxer_inst : entity work.cdc_strobe
port map(