How can i push this
["a", "bb", "c", "dd", "e", "ff", "g", "hh"] //type is Vec<String>
to this
fn list_codes() -> BiMap<char, &'static str>
{
let letters = BiMap::<char, &str>::new();
[('a',"cl01"),
('b',"cl02"),
('c',"cl03"),
('d',"cl04")]
.into_iter()
.collect()
}
That should be like this
fn list_codes() -> BiMap<char, &'static str>
{
[('a',"cl01"),
('b',"cl02"),
('c',"cl03"),
('d',"cl04"),
('a',"bb"),
('c',"dd"),
('e',"ff"),
('g',"hh")]
.into_iter()
.collect()
}
Actually the logic is simple. It will take a list from csv file and import to a BiMap. After the importion of bimap, the program will use this BiMap to encode/decode the texts.
EXTRAS:
The rest of code is here:
//-------------------------//
#![allow(non_snake_case)]
//-------------------------//
#![allow(dead_code)]
//-------------------------//
#![allow(unused_variables)]
//-------------------------//
#![allow(unused_imports)]
//-------------------------//
// #![allow(inactive_code)]
//-------------------------//
use std::error::Error as stdErr;
use std::io::Error as ioErr;
use std::net::ToSocketAddrs;
use std::process;
use std::env;
use std::fs;
use std::array;
use std::io;
use std::slice::SliceIndex;
use csv::Error as csvErr;
use csv::Reader;
use csv::StringRecord;
use bimap::BiMap;
use directories::ProjectDirs;
use serde::Deserialize;
//#[cfg(doc)] #[doc = include_str!("../Changelog.md")] pub mod _changelog{}
#[derive(Deserialize, Debug)]
struct Config {
seperator: String,
list_file_path: String,
}
#[derive(Debug, Deserialize)]
struct Record <'a> {
letter: char,
code: &'a str,
}
fn find_and_read_config_file() -> Result<String, ioErr>
{
if let Some(proj_dirs) = ProjectDirs::from
(
"dev",
"rusty-bois",
"test-config-parser",
)
{
let config_file_name = "settings.toml";
let config_path = proj_dirs.config_dir();
let config_file = fs::read_to_string(config_path.join(config_file_name),);
config_file
}
else
{
Err(ioErr::new(io::ErrorKind::NotFound, "no"))
}
}
fn parse_toml() -> String
{
let config_file = find_and_read_config_file();
let config: Config = match config_file
{
Ok(file) => toml::from_str(&file).unwrap(),
Err(_) => Config
{
seperator: "x".to_string(),
list_file_path: "rdl".to_string(),
},
};
let result = format!("{}\n{}",config.list_file_path, config.seperator);
return result;
}
fn reformat_parse_toml() -> Vec<String>
{
let mut vec_values = Vec::new();
let mut i = 0;
for values in parse_toml().split("\n")
{
vec_values.insert(i,values.to_string());
i += 1;
}
vec_values
}
fn read_and_parse_csv() -> Result<Vec<StringRecord>, csvErr>
{
let config_vars = reformat_parse_toml();
let csv_file_path = &config_vars[0];
let mut rdr = Reader::from_path(csv_file_path)?;
rdr.records().collect()
}
fn reformat_read_and_parse_csv() -> Vec<String>
{
let csv_records = read_and_parse_csv();
let mut csv_records_as_list = Vec::new();
let mut i = 0;
for a in csv_records.iter()
{
for b in a
{
for c in b
{
csv_records_as_list.insert(i, c.to_string());
i += 1
}
}
}
csv_records_as_list
}
fn input() -> String
{
let mut input = String::new();
match io::stdin().read_line(&mut input) {
Ok(_) => {
return input.to_string();
},
Err(e) => {
return e.to_string();
}
}
}
fn list_codes() -> BiMap<char, &'static str>
{
let letters = BiMap::<char, &str>::new();
[('a',"cl01"),
('b',"cl02"),
('c',"cl03"),
('d',"cl04")]
.into_iter()
.collect()
}
fn coder(flag: &str) -> String
{
let letters = list_codes();
let mut result_raw = String::new();
let result = String::new();
let config_vars = reformat_parse_toml();
let split_char = &config_vars[1];
if flag == "e"
{
println!("Enter the text that you want to encrypt:");
let ipt = input().trim_end().to_string();
let ipt_char = ipt.chars();
for letter in ipt_char
{
result_raw = format!("{}{}{}",result_raw,split_char,letters.get_by_left(&letter).unwrap());
}
let result = &result_raw[1..];
return result.to_string();
}
else if flag == "d"
{
println!("Enter the text that you want to decrypt:");
let ipt = input().trim_end().to_string();
let ipt_char = ipt.chars();
for code in ipt.split(split_char)
{
result_raw = format!("{} {}", result_raw, letters.get_by_right(code).unwrap());
}
let decoded = result_raw;
return decoded;
}
else
{
return "Error while decode/encode the input".to_string();
}
}
fn coder_from_file(flag: &str, path: &str) -> String
{
let letters = list_codes();
let mut result_raw = String::new();
let result = String::new();
let contents = fs::read_to_string(path)
.expect("Something went wrong reading the file");
let config_vars = reformat_parse_toml();
let split_char = &config_vars[1];
if flag == "ef"
{
for letter in contents.chars()
{
result_raw = format!("{}{}{}",result_raw,split_char,letters.get_by_left(&letter).unwrap());
}
let result = &result_raw[1..];
return result.to_string();
}
else if flag == "df"
{
for code in contents.replace("\n", "xpl01").split(split_char)
{
// You might want to have a look at the `String.push_str()` function to avoid creating a new string every time
result_raw = format!("{}{}", result_raw, letters.get_by_right(code).unwrap());
}
let result = result_raw;
return result;
}
else
{
return "Error while decode/encode the input".to_string();
}
}
fn coder_options() -> String
{
let args: Vec<String> = env::args().collect();
let mode = &args[1];
let config_vars = reformat_parse_toml();
let split_char = &config_vars[1];
let mut m_opt = String::new();
if mode == "-d"
{
m_opt = coder(&"d");
}
else if mode == "-e"
{
m_opt = coder(&"e");
}
else if mode == "-ef"
{
let filename = &args[2];
m_opt = coder_from_file(&"ef",&filename);
}
else if mode == "-df"
{
let filename = &args[2];
m_opt = coder_from_file(&"df",&filename);
}
else
{
println!("You picked wrong flag. Please select a vaild one.")
}
return m_opt;
}
fn main ()
{
let coder_options = coder_options();
println!("{}", coder_options)
}
Here is Cargo.toml
[package]
name = "Coder"
version = "0.1.0"
edition = "2021"
authors = ["Huso112"]
license= "GPL-3.0"
description = "This program encrypt your datas with special codes."
repository=""
[[bin]]
path = "src/Coder.rs"
name = "Coder"
[dependencies]
bimap = "~0.6.1"
directories = "~4.0.1"
serde = {version="~1.0.133", features=["derive"]}
toml = "~0.5.8"
csv = "~1.1.6"
Here is settings.toml file
seperator = "z"
list_file_path = "/home/hoovy/.config/test-config-parser/lang-table.csv"
Here is lang-table.csv file
letter,code
a,bb
c,dd
e,ff
g,hh
Boring way:
let mut i = 0;
while i < vec_values.len() - 1 {
let key_str = &vec_values[i];
let value = &vec_values[i + 1].clone();
let key = match key_str.chars().nth(0) {
Some(key) => key,
None => {
println!("error: empty key");
break;
}
};
letters.insert(key, value);
i += 2;
}
A version without clone() if you don't need vec_values after this, and can tear it apart:
if vec_values.len() % 2 != 0 {
println!("error: missing a last value");
return;
}
while !vec_values.is_empty() {
// Note: ok to unwrap, because we are sure that vec_values has an even number of values
let value = vec_values.pop().unwrap();
let key_str = vec_values.pop().unwrap();
let key = match key_str.chars().nth(0) {
Some(key) => key,
None => {
println!("error: empty key");
break;
}
};
letters.insert(key, value);
}
There's also a "smart" way using Iterator methods, but I'll refrain from advising it here.
Related
Currently I have a Rust function that searches through a large JSON file (About 1,080,000 lines) Currently this function takes about 1 second to search through this file, the data in this file is mostly stuff like this:
{"although":false,"radio":2056538449,"hide":1713884795,"hello":1222349560.787047,"brain":903780409.0046091,"heard":-1165604870.8374772}
How would I be able to increase the performance of this function?
Here is my Main.rs file.
use std::collections::VecDeque;
use std::fs::File;
use std::io::BufWriter;
use std::io::{BufRead, BufReader, Write};
fn search(filename: &str, search_line: &str) -> Result<VecDeque<u32>, std::io::Error> {
let file = File::open(filename)?;
let mut reader = BufReader::with_capacity(2048 * 2048, file);
let mut line_numbers = VecDeque::new();
let mut line_number = 0;
let start = std::time::Instant::now();
loop {
line_number += 1;
let mut line = String::new();
let n = reader.read_line(&mut line)?;
if n == 0 {
break;
}
if line.trim() == search_line {
line_numbers.push_back(line_number);
println!(
"Matching line found on line number {}: {}",
line_number, line
);
break;
}
}
let elapsed = start.elapsed();
println!("Elapsed time: {:?}", elapsed);
if line_numbers.is_empty() {
println!("No lines found that match the given criteria");
}
Ok(line_numbers)
}
fn main() {
let database = "Test.json";
if let Err(e) = search(database, r#"{"08934":420696969}"#) {
println!("Error reading file: {}", e);
}
}
I am trying to use the polars rust library to create dataframes from json fetched from stats.nba.com, (example json). The best example I could find for creating a dataframe from json was from the docs but I'm not sure how to load a serde_json::Value into a Cursor and pass it into the JsonReader. Below is my code to load everything into Vecs and then create the Series and DataFrame, but is there a better way?:
fn load_dataframe(&self) -> Result<()> {
let endpoint_json = self.send_request().unwrap();
let result_sets = endpoint_json["resultSets"].as_array().unwrap();
for data_set in result_sets {
let data_set_values = data_set["rowSet"].as_array().unwrap();
let data_set_headers = data_set["headers"].as_array().unwrap();
let mut headers_to_values: HashMap<&str, Vec<&Value>> = HashMap::new();
for (pos, row) in data_set_values.iter().enumerate() {
if pos == 0 {
init_columns(&mut headers_to_values, row, data_set_headers);
} else {
insert_row_values(&mut headers_to_values, row, data_set_headers);
}
}
let mut df_series: Vec<Series> = Vec::new();
for (col_name, json_values) in headers_to_values {
if json_values.is_empty() { continue; }
let first_val = json_values[0];
if first_val.is_null() { continue; }
if first_val.is_i64() {
let typed_data = json_values.iter().map(|&v| v.as_i64().unwrap_or(0)).collect::<Vec<i64>>();
df_series.push(Series::new(col_name, typed_data));
} else if first_val.is_f64() {
let typed_data = json_values.iter().map(|&v| v.as_f64().unwrap_or(0.0)).collect::<Vec<f64>>();
df_series.push(Series::new(col_name, typed_data));
} else {
let typed_data = json_values.iter().map(|&v| v.as_str().unwrap_or("")).collect::<Vec<&str>>();
df_series.push(Series::new(col_name, typed_data));
}
}
let data_set_name = data_set["name"].as_str().unwrap();
let df = DataFrame::new(df_series)?;
println!("{} \n{:?}", data_set_name, df);
}
Ok(())
}
fn init_columns<'a>(headers_to_values: &mut HashMap<&'a str, Vec<&'a Value>>, first_row: &'a Value, headers: &'a Vec<Value>) -> () {
let first_row_array = first_row.as_array().unwrap();
for (pos, col_val) in first_row_array.iter().enumerate() {
let col_name = headers[pos].as_str().unwrap();
headers_to_values.insert(col_name, vec![col_val]);
}
}
fn insert_row_values<'a>(headers_to_values: &mut HashMap<&'a str, Vec<&'a Value>>, row: &'a Value, headers: &'a Vec<Value>) -> () {
let row_array = row.as_array().unwrap();
for (pos, col_val) in row_array.iter().enumerate() {
let col_name = headers[pos].as_str().unwrap();
let series_values = headers_to_values.get_mut(col_name).unwrap();
series_values.push(col_val);
}
}
I'm trying to use csv and serde to read a mixed-delimiter csv-type file in rust, but I'm having a hard time seeing how to use these libraries to accomplish it. Each line looks roughly like:
value1|value2|subvalue1,subvalue2,subvalue3|value4
and would de-serialize to a struct that looks like:
struct Line {
value1:u64,
value2:u64,
value3:Vec<u64>,
value4:u64,
}
Any guidance on how to tell the library that there are two different delimiters and that one of the columns has this nested structure?
Ok, I'm still a beginner in Rust so I can't guarantee that this is good at all- I suspect it could be done more efficiently, but I do have a solution that works-
use csv::{ReaderBuilder};
use serde::{Deserialize, Deserializer};
use serde::de::Error;
use std::error::Error as StdError;
#[derive(Debug, Deserialize)]
pub struct ListType {
values: Vec<u8>,
}
fn deserialize_list<'de, D>(deserializer: D) -> Result<ListType , D::Error>
where D: Deserializer<'de> {
let buf: &str = Deserialize::deserialize(deserializer)?;
let mut rdr = ReaderBuilder::new()
.delimiter(b',')
.has_headers(false)
.from_reader(buf.as_bytes());
let mut iter = rdr.deserialize();
if let Some(result) = iter.next() {
let record: ListType = result.map_err(D::Error::custom)?;
return Ok(record)
} else {
return Err("error").map_err(D::Error::custom)
}
}
struct Line {
value1:u64,
value2:u64,
#[serde(deserialize_with = "deserialize_list")]
value3:ListType,
value4:u64,
}
fn read_line(line: &str) -> Result<Line, Box<dyn StdError>> {
let mut rdr = ReaderBuilder::new()
.delimiter(b'|')
.from_reader(line.as_bytes());
let mut iter = rdr.deserialize();
if let Some(result) = iter.next() {
let record: Line = result?;
return Ok(Line)
} else {
return Err(From::from("error"));
}
}
[EDIT]
I found that the above solution was intolerably slow, but I was able to make performance acceptable by simply manually deserializing the nested type into a fixed size array by-
#[derive(Debug, Deserialize)]
pub struct ListType {
values: [Option<u8>; 8],
}
fn deserialize_farray<'de, D>(deserializer: D) -> Result<ListType, D::Error>
where
D: Deserializer<'de>,
{
let buf: &str = Deserialize::deserialize(deserializer)?;
let mut split = buf.split(",");
let mut dest: CondList = CondList {
values: [None; 8],
};
let mut ind: usize = 0;
for tok in split {
if tok == "" {
break;
}
match tok.parse::<u8>() {
Ok(val) => {
dest.values[ind] = Some(val);
}
Err(e) => {
return Err(e).map_err(D::Error::custom);
}
}
ind += 1;
}
return Ok(dest);
}
I'm making an application with swift 3.0. But I have a problem, because in the API REST still have not implemented the service, I'm creating a simulated JSON to continue working. But the problem as you will see at the end of all the explanation in the image is that I do not know how to declare a JSON "-.- .... Basically the program will make a call to the server and it will respond with a JSON (now I pass it "the simulated" you will see it in the code). And with that JSON maps it with ObjectMapper to some models (that I pass the code) so that in the end the application has an object.
Error declaring Simulated JSON
These are the three models I have to map the JSON when it will come from the server or in this case, the simulated JSON.
The first is "LegendEntriesModel":
import Foundation
import ObjectMapper
import AlamofireDomain
class LegendEntriesModel: Mappable {
fileprivate var _id_snapshot: String?
fileprivate var _date: String?
fileprivate var _deliverables: [DeliverablesModel]?
init(){}
required init?(map: Map) { }
func mapping(map: Map) {
self.id_snapshot <- map["id_snapshot"]
self.date <- map["date"]
self.deliverables <- map["deliverables"]
}
var id_snapshot: String {
get {
if _id_snapshot == "" {
_id_snapshot = ""
}
return _id_snapshot!
}
set {
_id_snapshot = newValue
}
}
var date: String {
get {
if _date == "" {
_date = ""
}
return _date!
}
set {
_date = newValue
}
}
var deliverables: [DeliverablesModel] {
get {
if _deliverables == nil {
_deliverables = []
}
return _deliverables!
}
set {
_deliverables = newValue
}
}
//MARK: RELEASE MEMORY BETWEEN OBJECT AND API REST (BROKE DEPENDENCIS)
func copy()->LegendEntriesModel {
let legendEntriesModel = LegendEntriesModel()
legendEntriesModel.id_snapshot = self.id_snapshot
legendEntriesModel.date = self.date
legendEntriesModel.deliverables = copyDeliverables()
return legendEntriesModel
}
func copyDeliverables() -> [DeliverablesModel]{
var newArray: [DeliverablesModel] = []
for item in deliverables {
newArray.append(item.copy())
}
return newArray
}
}
The second on is "DeliverablesModel"
import Foundation
import ObjectMapper
import AlamofireDomain
class DeliverablesModel: Mappable {
fileprivate var _id: String?
fileprivate var _type: String?
fileprivate var _url_layer: String?
fileprivate var _options: OptionsDeliverablesModel?
init(){}
required init?(map: Map) { }
func mapping(map: Map) {
self.id <- map["id"]
self.type <- map["type"]
self.url_layer <- map["url_layer"]
self.options <- map["options"]
}
var id: String {
get {
if _id == "" {
_id = ""
}
return _id!
}
set {
_id = newValue
}
}
var type: String {
get {
if _type == "" {
_type = ""
}
return _type!
}
set {
_type = newValue
}
}
var url_layer: String {
get {
if _url_layer == "" {
_url_layer = ""
}
return _url_layer!
}
set {
_url_layer = newValue
}
}
var options: OptionsDeliverablesModel {
get {
if _options == nil {
_options = OptionsDeliverablesModel()
}
return _options!
}
set {
_options = newValue
}
}
//MARK: RELEASE MEMORY BETWEEN OBJECT AND API REST (BROKE DEPENDENCIS)
func copy()->DeliverablesModel {
let deliverablesModel = DeliverablesModel()
deliverablesModel.id = self.id
deliverablesModel.type = self.type
deliverablesModel.url_layer = self.url_layer
deliverablesModel.options = self.options
return deliverablesModel
}
}
And the last one is "OptionsDeliverablesModel":
import Foundation
import ObjectMapper
import AlamofireDomain
class OptionsDeliverablesModel: Mappable {
fileprivate var _type: String?
fileprivate var _max_range: Float?
fileprivate var _min_range: Float?
fileprivate var _title: String?
fileprivate var _initial_max_value: Float?
fileprivate var _initial_min_value: Float?
fileprivate var _id: String?
init(){}
required init?(map: Map) { }
func mapping(map: Map) {
self.type <- map["type"]
self.max_range <- map["max_range"]
self.min_range <- map["min_range"]
self.title <- map["title"]
self.initial_max_value <- map["initial_max_value"]
self.initial_min_value <- map["initial_min_value"]
self.id <- map["id"]
}
var type: String {
get {
if _type == "" {
_type = ""
}
return _type!
}
set {
_type = newValue
}
}
var max_range: Float {
get {
if _max_range == 0 {
_max_range = 0
}
return _max_range!
}
set {
_max_range = newValue
}
}
var min_range: Float {
get {
if _min_range == 0 {
_min_range = 0
}
return _min_range!
}
set {
_min_range = newValue
}
}
var title: String {
get {
if _title == "" {
_title = ""
}
return _title!
}
set {
_title = newValue
}
}
var initial_max_value: Float {
get {
if _initial_max_value == 0 {
_initial_max_value = 0
}
return _initial_max_value!
}
set {
_initial_max_value = newValue
}
}
var initial_min_value: Float {
get {
if _initial_min_value == 0 {
_initial_min_value = 0
}
return _initial_min_value!
}
set {
_initial_min_value = newValue
}
}
var id: String {
get {
if _id == "" {
_id = ""
}
return _id!
}
set {
_id = newValue
}
}
//MARK: RELEASE MEMORY BETWEEN OBJECT AND API REST (BROKE DEPENDENCIS)
func copy()->OptionsDeliverablesModel {
let optionsDeliverablesModel = OptionsDeliverablesModel()
optionsDeliverablesModel.type = self.type
optionsDeliverablesModel.max_range = self.max_range
optionsDeliverablesModel.min_range = self.min_range
optionsDeliverablesModel.title = self.title
optionsDeliverablesModel.initial_max_value = self.initial_max_value
optionsDeliverablesModel.initial_min_value = self.initial_min_value
optionsDeliverablesModel.id = self.id
return optionsDeliverablesModel
}
}
With these three "Models" are what I can map the JSON inside the class DAO, but here is the problem, because I do not know how to pass my JSON that I have simulated.
The code is as follows:
import AlamofireDomain
import Alamofire
import ObjectMapper
class DeliverablesLegendDAO : SimpleDAO {
var deliverables = Dictionary<String, Any>()
deliverables = [{"legendEntries": [{"id_snapshot": "123","date": "2016-10-20","deliveries": [{"id": "12","type": "RGB","url_layer":"topp:states","options": [{"type": "range","max_range": 100,"min_range": 0,"title": "Option RGB","initial_max_value": 100,"initial_min_value": 0,"id": "depth"}]}]}]}]
func snapshots(_ parameters: String,
callbackFuncionOK: #escaping (LegendEntriesModel)->(),
callbackFunctionERROR: #escaping (Int,NSError)->()) {
Alamofire.request(parameters,
method: .post,
encoding: JSONEncoding.default)
.responseJSON { response in
if response.result.isSuccess{
if let status = response.response?.statusCode {
switch(status){
case 200:
let value = response
let legendEntries = Mapper<LegendEntriesModel>().map(JSONObject: value)
callbackFuncionOK(legendEntries!)
default:
break
}
}
}
else {
var statusCode = -1
if let _response = response.response {
statusCode = _response.statusCode
}
var nsError: NSError = NSError(domain: Constants.UNKNOWN_HTTP_ERROR_MSG,
code: Constants.UNKNOWN_HTTP_ERROR_ID,
userInfo: nil)
if let _error = response.result.error {
nsError = _error as NSError
}
callbackFunctionERROR(statusCode,nsError)
}
}
}
}
As you can see in the image, I am declaring my simulated JSON wrong and then map it with "LegendDeliveriesModel" to an object. How can I do it?
Error declaring simulated JSON
If you need anything else, tell me. I repeat, the problem is in the JSON simulated statement that I do not know how to pass it to DAO and that it maps it.
Hi not sure if you will be open to this, but it will be better to try creating a JSON in file and load it in using Bundle like this :
func loadJsonFrom(fileName: String) -> NSDictionary {
let path = Bundle.main.path(forResource: filename, ofType: "json")
let jsonData = try! Data(contentsOf: URL(fileURLWithPath: path!))
let jsonResult: NSDictionary = try! JSONSerialization.jsonObject(with: jsonData, options: .allowFragments) as! NSDictionary
return jsonResult
}
I think your syntax is wrong for declaring your JSON. Pretty sure declaring Dictionaries inline in swift you only use ["key":"value"]
So just remove all of the { and }
Edit: Sorry, didn't realise it was outside of a method. If you want to do that you have to declare it directly like so
var deliverables = ["legendEntries": ["id_snapshot": "123","date": "2016-10-20","deliveries": ["id": "12","type": "RGB","url_layer":"topp:states","options": ["type": "range","max_range": 100,"min_range": 0,"title": "Option RGB","initial_max_value": 100,"initial_min_value": 0,"id": "depth"]]]]
If you're just using it as mock Data I would also consider making it a let constant rather than a variable
How could I use data in this kind of CSV file? Or how could I print for example row 2 value for "inside" column and assign it to a property / entity?
I have this kind of file I got from excel file converted to Numbers, I'd like to grab data for each column and use them.
The original CSV file opened in numbers:
The console output I got:
Using this methods:
func readDataFromCSV(fileName:String, fileType: String)-> String!{
guard let filepath = Bundle.main.path(forResource: fileName, ofType: fileType)
else {
return nil
}
do {
var contents = try String(contentsOfFile: filepath, encoding: .utf8)
contents = cleanRows(file: contents)
return contents
} catch {
print("File Read Error for file \(filepath)")
return nil
}
}
func cleanRows(file:String)->String{
var cleanFile = file
cleanFile = cleanFile.replacingOccurrences(of: "\r", with: "\n")
cleanFile = cleanFile.replacingOccurrences(of: "\n\n", with: "\n")
// cleanFile = cleanFile.replacingOccurrences(of: ";;", with: "")
// cleanFile = cleanFile.replacingOccurrences(of: ";\n", with: "")
return cleanFile
}
SOLUTION thanks to Jens Meder
using
func csv(data: String) -> [[String]] {
var result: [[String]] = []
let rows = data.components(separatedBy: "\n")
for row in rows {
let columns = row.components(separatedBy: ";")
result.append(columns)
}
return result
}
in viewDidLoad
var data = readDataFromCSV(fileName: kCSVFileName, fileType: kCSVFileExtension)
data = cleanRows(file: data!)
let csvRows = csv(data: data!)
print(csvRows[1][1]) // UXM n. 166/167
What you want to do is splitting up the string in rows and then into columns (basically a two dimensional array of Strings). Swift already provides the components method for that on String structs.
func csv(data: String) -> [[String]] {
var result: [[String]] = []
let rows = data.components(separatedBy: "\n")
for row in rows {
let columns = row.components(separatedBy: ";")
result.append(columns)
}
return result
}
Then you can access any value via:
var data = readDataFromCSV(fileName: kCSVFileName, fileType: kCSVFileExtension)
data = cleanRows(file: data)
let csvRows = csv(data: data)
print(csvRows[1][1]) //UXM n. 166/167.
Swift 4
Sometime CSV file is more complicated such as special characters (e.g. comma), the values are surrounded by double quotes as examples below:
Hello, "Complicated String, with a comma inside", 123
In this case, I use:
let dataString: String! = String.init(data: data!, encoding: .utf8)
var items: [(String, String, String)] = []
let lines: [String] = dataString.components(separatedBy: NSCharacterSet.newlines) as [String]
for line in lines {
var values: [String] = []
if line != "" {
if line.range(of: "\"") != nil {
var textToScan:String = line
var value:NSString?
var textScanner:Scanner = Scanner(string: textToScan)
while textScanner.string != "" {
if (textScanner.string as NSString).substring(to: 1) == "\"" {
textScanner.scanLocation += 1
textScanner.scanUpTo("\"", into: &value)
textScanner.scanLocation += 1
} else {
textScanner.scanUpTo(",", into: &value)
}
values.append(value! as String)
if textScanner.scanLocation < textScanner.string.count {
textToScan = (textScanner.string as NSString).substring(from: textScanner.scanLocation + 1)
} else {
textToScan = ""
}
textScanner = Scanner(string: textToScan)
}
// For a line without double quotes, we can simply separate the string
// by using the delimiter (e.g. comma)
} else {
values = line.components(separatedBy: ",")
}
// Put the values into the tuple and add it to the items array
let item = (values[0], values[1], values[2])
items.append(item)
print(item.1)
print(item.2)
print(item.3)
}
}
It is just written in Swift 4, the original is from https://www.appcoda.com/core-data-preload-sqlite-database/
Starting from iOS15 there is a new Official framework called TabularData, try to use it.
import TabularData
let url = Bundle.main.url(forResource: "csvFileName", withExtension: "csv")!
let result = try? DataFrame(contentsOfCSVFile: url)
print(result)
Then you can convert them into the data model you need
More Detail About TabularData(WWDC)
Swift 5.0
.scanLocaion and .scanUpTo() were deprecated in iOS13. Here's a working version of Chhaileng's answer.
func openCSV(fileName:String, fileType: String)-> String!{
guard let filepath = Bundle.main.path(forResource: fileName, ofType: fileType)
else {
return nil
}
do {
let contents = try String(contentsOfFile: filepath, encoding: .utf8)
return contents
} catch {
print("File Read Error for file \(filepath)")
return nil
}
}
func parseCSV(){
let dataString: String! = openCSV(fileName: "MeislinDemo", fileType: "csv")
var items: [(String, String, String)] = []
let lines: [String] = dataString.components(separatedBy: NSCharacterSet.newlines) as [String]
for line in lines {
var values: [String] = []
if line != "" {
if line.range(of: "\"") != nil {
var textToScan:String = line
var value:String?
var textScanner:Scanner = Scanner(string: textToScan)
while !textScanner.isAtEnd {
if (textScanner.string as NSString).substring(to: 1) == "\"" {
textScanner.currentIndex = textScanner.string.index(after: textScanner.currentIndex)
value = textScanner.scanUpToString("\"")
textScanner.currentIndex = textScanner.string.index(after: textScanner.currentIndex)
} else {
value = textScanner.scanUpToString(",")
}
values.append(value! as String)
if !textScanner.isAtEnd{
let indexPlusOne = textScanner.string.index(after: textScanner.currentIndex)
textToScan = String(textScanner.string[indexPlusOne...])
} else {
textToScan = ""
}
textScanner = Scanner(string: textToScan)
}
// For a line without double quotes, we can simply separate the string
// by using the delimiter (e.g. comma)
} else {
values = line.components(separatedBy: ",")
}
// Put the values into the tuple and add it to the items array
let item = (values[0], values[1], values[2])
items.append(item)
print(item.0)
print(item.1)
print(item.2)
}
}
}

This is for CSV file for swift 4.2
var dataArray = [[String]]()
if let path = Bundle.main.path(forResource: "file", ofType: "csv") {
dataArray = []
let url = URL(fileURLWithPath: path)
do {
let data = try Data(contentsOf: url)
let dataEncoded = String(data: data, encoding: .utf8)
if let dataArr = dataEncoded?.components(separatedBy: "\r\n").map({ $0.components(separatedBy: ";") }) {
for line in dataArr {
dataArray.append(line)
}
}
} catch let jsonErr {
print("\n Error reading CSV file: \n ", jsonErr)
}
}