pd/
migrate.rs

1//! Logic for handling chain upgrades.
2//!
3//! When consensus-breaking changes are made to the Penumbra software,
4//! node operators must coordinate to perform a chain upgrade.
5//! This module declares how local `pd` state should be altered, if at all,
6//! in order to be compatible with the network post-chain-upgrade.
7mod mainnet1;
8mod mainnet2;
9mod reset_halt_bit;
10mod simple;
11mod testnet72;
12mod testnet74;
13mod testnet76;
14mod testnet77;
15mod testnet78;
16
17use anyhow::{ensure, Context};
18use penumbra_sdk_governance::StateReadExt;
19use penumbra_sdk_sct::component::clock::EpochRead;
20use std::path::{Path, PathBuf};
21use tracing::instrument;
22
23use cnidarium::Storage;
24use penumbra_sdk_app::SUBSTORE_PREFIXES;
25
26use flate2::write::GzEncoder;
27use flate2::Compression;
28use std::fs::File;
29
30/// The kind of migration that should be performed.
31#[derive(Debug)]
32pub enum Migration {
33    /// Set the chain's halt bit to `false`.
34    ReadyToStart,
35    /// A simple migration: adds a key to the consensus state.
36    /// This is useful for testing upgrade mechanisms, including in production.
37    SimpleMigration,
38    /// Testnet-72 migration:
39    /// - Migrate `BatchSwapOutputData` to new protobuf, replacing epoch height with index.
40    Testnet72,
41    /// Testnet-74 migration:
42    /// - Update the base liquidity index to order routable pairs by descending liquidity
43    /// - Update arb executions to include the amount of filled input in the output
44    /// - Add `AuctionParameters` to the consensus state
45    Testnet74,
46    /// Testnet-76 migration:
47    /// - Heal the auction component's VCB tally.
48    /// - Update FMD parameters to new protobuf structure.
49    Testnet76,
50    /// Testnet-77 migration:
51    /// - Reset the halt bit
52    Testnet77,
53    /// Testnet-78 migration:
54    /// - Truncate various user-supplied `String` fields to a maximum length.
55    /// - Populate the DEX NV price idnexes with position data
56    Testnet78,
57    /// Mainnet-1 migration:
58    /// - Restore IBC packet commitments for improperly handled withdrawal attempts
59    Mainnet1,
60    /// Mainnet-2 migration:
61    /// - no-op
62    Mainnet2,
63}
64
65impl Migration {
66    #[instrument(skip(pd_home, genesis_start, force))]
67    pub async fn migrate(
68        &self,
69        pd_home: PathBuf,
70        comet_home: Option<PathBuf>,
71        genesis_start: Option<tendermint::time::Time>,
72        force: bool,
73    ) -> anyhow::Result<()> {
74        tracing::debug!(
75            ?pd_home,
76            ?genesis_start,
77            ?force,
78            "preparing to run migration!"
79        );
80        let rocksdb_dir = pd_home.join("rocksdb");
81        let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?;
82        ensure!(
83            storage.latest_snapshot().is_chain_halted().await || force,
84            "to run a migration, the chain halt bit must be set to `true` or use the `--force` cli flag"
85        );
86
87        // Assert that the local chain state version is not corrupted, see `v0.80.10` release notes.
88        let latest_version = storage.latest_version();
89        let block_height = storage.latest_snapshot().get_block_height().await?;
90        ensure!(
91            latest_version == block_height || force,
92            "local chain state version is corrupted: {} != {}",
93            latest_version,
94            block_height
95        );
96
97        tracing::info!("started migration");
98
99        // If this is `ReadyToStart`, we need to reset the halt bit and return early.
100        if let Migration::ReadyToStart = self {
101            reset_halt_bit::migrate(storage, pd_home, genesis_start).await?;
102            return Ok(());
103        }
104
105        match self {
106            Migration::SimpleMigration => {
107                simple::migrate(storage, pd_home.clone(), genesis_start).await?
108            }
109            Migration::Mainnet1 => {
110                mainnet1::migrate(storage, pd_home.clone(), genesis_start).await?;
111            }
112            Migration::Mainnet2 => {
113                mainnet2::migrate(storage, pd_home.clone(), genesis_start).await?;
114            }
115            // We keep historical migrations around for now, this will help inform an abstracted
116            // design. Feel free to remove it if it's causing you trouble.
117            _ => unimplemented!("the specified migration is unimplemented"),
118        }
119
120        if let Some(comet_home) = comet_home {
121            let genesis_path = pd_home.join("genesis.json");
122            migrate_comet_data(comet_home, genesis_path).await?;
123        }
124
125        Ok(())
126    }
127}
128
129/// Compress single directory to gzipped tar archive. Accepts an Option for naming
130/// the subdir within the tar archive, which defaults to ".", meaning no nesting.
131pub fn archive_directory(
132    src_directory: PathBuf,
133    archive_filepath: PathBuf,
134    subdir_within_archive: Option<String>,
135) -> anyhow::Result<()> {
136    // Don't clobber an existing target archive.
137    if archive_filepath.exists() {
138        tracing::error!(
139            "export archive filepath already exists: {}",
140            archive_filepath.display()
141        );
142        anyhow::bail!("refusing to overwrite existing archive");
143    }
144
145    tracing::info!(
146        "creating archive {} -> {}",
147        src_directory.display(),
148        archive_filepath.display()
149    );
150    let tarball_file = File::create(&archive_filepath)
151        .context("failed to create file for archive: check parent directory and permissions")?;
152    let enc = GzEncoder::new(tarball_file, Compression::default());
153    let mut tarball = tar::Builder::new(enc);
154    let subdir_within_archive = subdir_within_archive.unwrap_or(String::from("."));
155    tarball
156        .append_dir_all(subdir_within_archive, src_directory.as_path())
157        .context("failed to package archive contents")?;
158    Ok(())
159}
160
161/// Read the last block timestamp from the pd state.
162pub async fn last_block_timestamp(home: PathBuf) -> anyhow::Result<tendermint::Time> {
163    let rocksdb = home.join("rocksdb");
164    let storage = Storage::load(rocksdb, SUBSTORE_PREFIXES.to_vec())
165        .await
166        .context("error loading store for timestamp")?;
167    let state = storage.latest_snapshot();
168    let last_block_time = state
169        .get_current_block_timestamp()
170        .await
171        .context("error reading latest block timestamp")?;
172    storage.release().await;
173    Ok(last_block_time)
174}
175
176#[instrument(skip_all)]
177pub async fn migrate_comet_data(
178    comet_home: PathBuf,
179    new_genesis_file: PathBuf,
180) -> anyhow::Result<()> {
181    tracing::info!(?comet_home, ?new_genesis_file, "migrating comet data");
182
183    // Read the contents of new_genesis_file into a serde_json::Value and pull out .initial_height
184    let genesis_contents =
185        std::fs::read_to_string(new_genesis_file).context("error reading new genesis file")?;
186    let genesis_json: serde_json::Value =
187        serde_json::from_str(&genesis_contents).context("error parsing new genesis file")?;
188    tracing::info!(?genesis_json, "parsed genesis file");
189    let initial_height = genesis_json["initial_height"]
190        .as_str()
191        .context("error reading initial_height from genesis file")?
192        .parse::<u64>()?;
193
194    // Write the genesis data to HOME/config/genesis.json
195    let genesis_file = comet_home.join("config").join("genesis.json");
196    tracing::info!(?genesis_file, "writing genesis file to comet config");
197    std::fs::write(genesis_file, genesis_contents)
198        .context("error writing genesis file to comet config")?;
199
200    // Adjust the high-water mark in priv_validator_state.json but don't decrease it
201    adjust_priv_validator_state(&comet_home, initial_height)?;
202
203    // Delete other cometbft data.
204    clear_comet_data(&comet_home)?;
205
206    Ok(())
207}
208
209#[instrument(skip_all)]
210fn adjust_priv_validator_state(comet_home: &Path, initial_height: u64) -> anyhow::Result<()> {
211    let priv_validator_state = comet_home.join("data").join("priv_validator_state.json");
212    let current_state: serde_json::Value =
213        serde_json::from_str(&std::fs::read_to_string(&priv_validator_state)?)?;
214
215    let current_height = current_state["height"]
216        .as_str()
217        .context("error reading height from priv_validator_state.json")?
218        .parse::<u64>()?;
219    if current_height < initial_height {
220        tracing::info!(
221            "increasing height in priv_validator_state from {} to {}",
222            current_height,
223            initial_height
224        );
225        let new_state = serde_json::json!({
226            "height": initial_height.to_string(), // Important to use to_string here as if protojson
227            "round": 0,
228            "step": 0,
229        });
230        tracing::info!(?new_state, "updated priv_validator_state.json");
231        std::fs::write(
232            &priv_validator_state,
233            &serde_json::to_string_pretty(&new_state)?,
234        )?;
235    } else {
236        anyhow::bail!(
237            "priv_validator_state height {} is already greater than or equal to initial_height {}",
238            current_height,
239            initial_height
240        );
241    }
242
243    Ok(())
244}
245
246#[instrument(skip_all)]
247fn clear_comet_data(comet_home: &Path) -> anyhow::Result<()> {
248    let data_dir = comet_home.join("data");
249
250    /*
251    N.B. We want to preserve the `tx_index.db` directory.
252    Doing so will allow CometBFT to reference historical transactions behind the upgrade boundary.
253     */
254    for subdir in &["evidence.db", "state.db", "blockstore.db", "cs.wal"] {
255        let path = data_dir.join(subdir);
256        if path.exists() {
257            tracing::info!(?path, "removing file");
258            std::fs::remove_dir_all(path)?;
259        }
260    }
261
262    Ok(())
263}