pd/
migrate.rs

1//! Logic for handling chain upgrades.
2//!
3//! When consensus-breaking changes are made to the Penumbra software,
4//! node operators must coordinate to perform a chain upgrade.
5//! This module declares how local `pd` state should be altered, if at all,
6//! in order to be compatible with the network post-chain-upgrade.
7mod mainnet1;
8mod mainnet2;
9mod mainnet3;
10mod reset_halt_bit;
11mod simple;
12mod testnet72;
13mod testnet74;
14mod testnet76;
15mod testnet77;
16mod testnet78;
17
18use anyhow::{ensure, Context};
19use penumbra_sdk_governance::StateReadExt;
20use penumbra_sdk_sct::component::clock::EpochRead;
21use std::path::{Path, PathBuf};
22use tracing::instrument;
23
24use cnidarium::Storage;
25use penumbra_sdk_app::SUBSTORE_PREFIXES;
26
27use flate2::write::GzEncoder;
28use flate2::Compression;
29use std::fs::File;
30
31/// The kind of migration that should be performed.
32#[derive(Debug)]
33pub enum Migration {
34    /// Set the chain's halt bit to `false`.
35    ReadyToStart,
36    /// A simple migration: adds a key to the consensus state.
37    /// This is useful for testing upgrade mechanisms, including in production.
38    SimpleMigration,
39    /// Testnet-72 migration:
40    /// - Migrate `BatchSwapOutputData` to new protobuf, replacing epoch height with index.
41    Testnet72,
42    /// Testnet-74 migration:
43    /// - Update the base liquidity index to order routable pairs by descending liquidity
44    /// - Update arb executions to include the amount of filled input in the output
45    /// - Add `AuctionParameters` to the consensus state
46    Testnet74,
47    /// Testnet-76 migration:
48    /// - Heal the auction component's VCB tally.
49    /// - Update FMD parameters to new protobuf structure.
50    Testnet76,
51    /// Testnet-77 migration:
52    /// - Reset the halt bit
53    Testnet77,
54    /// Testnet-78 migration:
55    /// - Truncate various user-supplied `String` fields to a maximum length.
56    /// - Populate the DEX NV price idnexes with position data
57    Testnet78,
58    /// Mainnet-1 migration:
59    /// - Restore IBC packet commitments for improperly handled withdrawal attempts
60    Mainnet1,
61    /// Mainnet-2 migration:
62    /// - no-op
63    Mainnet2,
64    /// Mainnet-3 migration:
65    /// - no-op
66    Mainnet3,
67}
68
69impl Migration {
70    #[instrument(skip(pd_home, genesis_start, force))]
71    pub async fn migrate(
72        &self,
73        pd_home: PathBuf,
74        comet_home: Option<PathBuf>,
75        genesis_start: Option<tendermint::time::Time>,
76        force: bool,
77    ) -> anyhow::Result<()> {
78        tracing::debug!(
79            ?pd_home,
80            ?genesis_start,
81            ?force,
82            "preparing to run migration!"
83        );
84        let rocksdb_dir = pd_home.join("rocksdb");
85        let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?;
86        ensure!(
87            storage.latest_snapshot().is_chain_halted().await || force,
88            "to run a migration, the chain halt bit must be set to `true` or use the `--force` cli flag"
89        );
90
91        // Assert that the local chain state version is not corrupted, see `v0.80.10` release notes.
92        let latest_version = storage.latest_version();
93        let block_height = storage.latest_snapshot().get_block_height().await?;
94        ensure!(
95            latest_version == block_height || force,
96            "local chain state version is corrupted: {} != {}",
97            latest_version,
98            block_height
99        );
100
101        tracing::info!("started migration");
102
103        // If this is `ReadyToStart`, we need to reset the halt bit and return early.
104        if let Migration::ReadyToStart = self {
105            reset_halt_bit::migrate(storage, pd_home, genesis_start).await?;
106            return Ok(());
107        }
108
109        match self {
110            Migration::SimpleMigration => {
111                simple::migrate(storage, pd_home.clone(), genesis_start).await?
112            }
113            Migration::Mainnet1 => {
114                mainnet1::migrate(storage, pd_home.clone(), genesis_start).await?;
115            }
116            Migration::Mainnet2 => {
117                mainnet2::migrate(storage, pd_home.clone(), genesis_start).await?;
118            }
119            Migration::Mainnet3 => {
120                mainnet3::migrate(storage, pd_home.clone(), genesis_start).await?;
121            }
122            // We keep historical migrations around for now, this will help inform an abstracted
123            // design. Feel free to remove it if it's causing you trouble.
124            _ => unimplemented!("the specified migration is unimplemented"),
125        }
126
127        if let Some(comet_home) = comet_home {
128            let genesis_path = pd_home.join("genesis.json");
129            migrate_comet_data(comet_home, genesis_path).await?;
130        }
131
132        Ok(())
133    }
134}
135
136/// Compress single directory to gzipped tar archive. Accepts an Option for naming
137/// the subdir within the tar archive, which defaults to ".", meaning no nesting.
138pub fn archive_directory(
139    src_directory: PathBuf,
140    archive_filepath: PathBuf,
141    subdir_within_archive: Option<String>,
142) -> anyhow::Result<()> {
143    // Don't clobber an existing target archive.
144    if archive_filepath.exists() {
145        tracing::error!(
146            "export archive filepath already exists: {}",
147            archive_filepath.display()
148        );
149        anyhow::bail!("refusing to overwrite existing archive");
150    }
151
152    tracing::info!(
153        "creating archive {} -> {}",
154        src_directory.display(),
155        archive_filepath.display()
156    );
157    let tarball_file = File::create(&archive_filepath)
158        .context("failed to create file for archive: check parent directory and permissions")?;
159    let enc = GzEncoder::new(tarball_file, Compression::default());
160    let mut tarball = tar::Builder::new(enc);
161    let subdir_within_archive = subdir_within_archive.unwrap_or(String::from("."));
162    tarball
163        .append_dir_all(subdir_within_archive, src_directory.as_path())
164        .context("failed to package archive contents")?;
165    Ok(())
166}
167
168/// Read the last block timestamp from the pd state.
169pub async fn last_block_timestamp(home: PathBuf) -> anyhow::Result<tendermint::Time> {
170    let rocksdb = home.join("rocksdb");
171    let storage = Storage::load(rocksdb, SUBSTORE_PREFIXES.to_vec())
172        .await
173        .context("error loading store for timestamp")?;
174    let state = storage.latest_snapshot();
175    let last_block_time = state
176        .get_current_block_timestamp()
177        .await
178        .context("error reading latest block timestamp")?;
179    storage.release().await;
180    Ok(last_block_time)
181}
182
183#[instrument(skip_all)]
184pub async fn migrate_comet_data(
185    comet_home: PathBuf,
186    new_genesis_file: PathBuf,
187) -> anyhow::Result<()> {
188    tracing::info!(?comet_home, ?new_genesis_file, "migrating comet data");
189
190    // Read the contents of new_genesis_file into a serde_json::Value and pull out .initial_height
191    let genesis_contents =
192        std::fs::read_to_string(new_genesis_file).context("error reading new genesis file")?;
193    let genesis_json: serde_json::Value =
194        serde_json::from_str(&genesis_contents).context("error parsing new genesis file")?;
195    tracing::info!(?genesis_json, "parsed genesis file");
196    let initial_height = genesis_json["initial_height"]
197        .as_str()
198        .context("error reading initial_height from genesis file")?
199        .parse::<u64>()?;
200
201    // Write the genesis data to HOME/config/genesis.json
202    let genesis_file = comet_home.join("config").join("genesis.json");
203    tracing::info!(?genesis_file, "writing genesis file to comet config");
204    std::fs::write(genesis_file, genesis_contents)
205        .context("error writing genesis file to comet config")?;
206
207    // Adjust the high-water mark in priv_validator_state.json but don't decrease it
208    adjust_priv_validator_state(&comet_home, initial_height)?;
209
210    // Delete other cometbft data.
211    clear_comet_data(&comet_home)?;
212
213    Ok(())
214}
215
216#[instrument(skip_all)]
217fn adjust_priv_validator_state(comet_home: &Path, initial_height: u64) -> anyhow::Result<()> {
218    let priv_validator_state = comet_home.join("data").join("priv_validator_state.json");
219    let current_state: serde_json::Value =
220        serde_json::from_str(&std::fs::read_to_string(&priv_validator_state)?)?;
221
222    let current_height = current_state["height"]
223        .as_str()
224        .context("error reading height from priv_validator_state.json")?
225        .parse::<u64>()?;
226    if current_height < initial_height {
227        tracing::info!(
228            "increasing height in priv_validator_state from {} to {}",
229            current_height,
230            initial_height
231        );
232        let new_state = serde_json::json!({
233            "height": initial_height.to_string(), // Important to use to_string here as if protojson
234            "round": 0,
235            "step": 0,
236        });
237        tracing::info!(?new_state, "updated priv_validator_state.json");
238        std::fs::write(
239            &priv_validator_state,
240            &serde_json::to_string_pretty(&new_state)?,
241        )?;
242    } else {
243        anyhow::bail!(
244            "priv_validator_state height {} is already greater than or equal to initial_height {}",
245            current_height,
246            initial_height
247        );
248    }
249
250    Ok(())
251}
252
253#[instrument(skip_all)]
254fn clear_comet_data(comet_home: &Path) -> anyhow::Result<()> {
255    let data_dir = comet_home.join("data");
256
257    /*
258    N.B. We want to preserve the `tx_index.db` directory.
259    Doing so will allow CometBFT to reference historical transactions behind the upgrade boundary.
260     */
261    for subdir in &["evidence.db", "state.db", "blockstore.db", "cs.wal"] {
262        let path = data_dir.join(subdir);
263        if path.exists() {
264            tracing::info!(?path, "removing file");
265            std::fs::remove_dir_all(path)?;
266        }
267    }
268
269    Ok(())
270}