Mercurial repository log including subrepositories - mercurial

In our build scripts I use the templated hg log command to get a changelist for a particular build. This works great, but unfortunately it does not include changes in the subrepos (we have more than 10 of them). I would like to include them too, but there seems to be no such command.
I'm thinking about writing a script that:
Enumerates the .hgsubstate file at the starting revision
Finds out the subrepos and their starting revisions
Runs hg log for them
Merges and sorts the results by date.
Is there any other simpler way? Maybe a command I'm missing?

Since it's apparently on the todo for a while years and otherwise unavailable, I wrote my own subtools.py:
from __future__ import print_function
import hglib
import os
import sys
def get_substate(client, rev=None):
substate_filename = os.path.join(client.root(), '.hgsubstate')
if os.path.isfile(substate_filename):
lines = client.cat([substate_filename], rev).split('\n')
return { key : value for (value,key) in [ line.split() for line in lines if line ]}
else:
return {}
def substate_diff(client, revA, revB):
substate_a = get_substate(client, revA)
substate_b = get_substate(client, revB)
key_union = set(substate_b) | set(substate_a)
diff = {}
for key in key_union:
diff[key] = (substate_a[key], substate_b[key])
return diff
def recursive_log(path, revA, revB):
logtree = { 'logs': [], 'subrepos': {}}
try:
client = hglib.open(path)
except:
return logtree
if revA == revB:
print("no changes on {}".format(client.root()))
return logtree
print("Checking {} between {} and {}...".format(client.root(), revA, revB))
for key, revisions in substate_diff(client, revA, revB).iteritems():
if revisions[0] and revisions[1]:
logtree['subrepos'][key] = recursive_log(os.path.join(client.root(),key), revisions[0], revisions[1])
elif revisions[0] and not revisions[1]:
print("removed subrepo with path: %s"%key, file=sys.stderr)
elif revisions[1] and not revisions[2]:
print("added subrepo with path: %s"%key, file=sys.stderr)
logtree['logs'] = client.log("%s:%s"%(revA,revB))
return logtree
use as such:
In [15]: log = subtools.recursive_log('./', '947', '951')

Related

Python Google Drive API file-delete() method broken

I cannot get google-drive file-delete() method to work via the Python API.
It is acting broken.
I offer some info about my setup:
Ubuntu 16.04
Python 3.5.2 (default, Nov 12 2018, 13:43:14)
google-api-python-client (1.7.9)
google-auth (1.6.3)
google-auth-httplib2 (0.0.3)
google-auth-oauthlib (0.3.0)
Below, I list a Python script which can reproduce the bug:
"""
googdrive17.py
This script should delete files named 'hello.txt'
Ref:
https://developers.google.com/drive/api/v3/quickstart/python
https://developers.google.com/drive/api/v3/reference/files
Demo (Ubuntu):
sudo apt install python3-pip
sudo pip3 install --upgrade google-api-python-client
sudo pip3 install --upgrade google-auth-httplib2
sudo pip3 install --upgrade google-auth-oauthlib
python3 googdrive17.py
"""
import pickle
import os.path
from googleapiclient.discovery import build
from googleapiclient.http import MediaFileUpload
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# I s.declare a very permissive scope (for training only):
SCOPES = ['https://www.googleapis.com/auth/drive']
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as fh:
creds = pickle.load(fh)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server()
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
# I s.create a file so I can upload it:
with open('/tmp/hello.txt','w') as fh:
fh.write("hello world\n")
# From my laptop, I s.upload a file named hello.txt:
drive_service = build('drive', 'v3', credentials=creds)
file_metadata = {'name': 'hello.txt'}
media = MediaFileUpload('/tmp/hello.txt', mimetype='text/plain')
create_response = drive_service.files().create(body=file_metadata,
media_body=media,
fields='id').execute()
file_id = create_response.get('id')
print('new /tmp/hello.txt file_id:')
print(file_id)
# Q: With googleapiclient, how to filter files list()-response?
# A1: https://developers.google.com/drive/api/v3/reference/files/list
# A2: https://developers.google.com/drive/api/v3/search-files
list_response = drive_service.files().list(
orderBy = "createdTime desc",
q = "name='hello.txt'",
pageSize = 22,
fields = "files(id, name)"
).execute()
items = list_response.get('files', [])
if items:
for item in items:
print('I will try to delete this file:')
print(u'{0} ({1})'.format(item['name'], item['id']))
del_response = drive_service.files().delete(fileId=item['id'])
print('del_response.body:')
print( del_response.body)
print('I will try to emptyTrash:')
trash_response = drive_service.files().emptyTrash()
print('trash_response.body:')
print( trash_response.body)
else:
print('hello.txt not found in your google-drive account.')
When I run the script I see output similar to that listed below:
$ python3 googdrive17.py
new /tmp/hello.txt file_id:
1m8nKOfIeB0E5t60F_-9bKwIJds8PSvYY
I will try to delete this file:
hello.txt (1m8nKOfIeB0E5t60F_-9bKwIJds8PSvYY)
del_response.body:
None
I will try to delete this file:
hello.txt (1Ow4fcUBgEYUy3ezYScDKlLSMbp-hyOLT)
del_response.body:
None
I will try to delete this file:
hello.txt (1TiUrLgQdY1Cb9w0UWHjnmj7HZBaFsKcp)
del_response.body:
None
I will try to emptyTrash:
trash_response.body:
None
$
I see that two of the API calls work well:
files.list()
files.create()
Two calls appear broken:
files.delete()
files.emptyTrash()
Perhaps, though, I call them incorrectly?
How about this modification?
At first, the official document of Files: delete method and Files: emptyTrash method says as follows.
If successful, this method returns an empty response body.
By this, when the file was deleted and the trash was cleared, the returned del_response and trash_response are empty.
Modified script:
From your question, I could understand that files.list() and files.create() works. So I would like to propose the modification points for files.delete() and files.emptyTrash(). Please modify your script as follows.
From:
for item in items:
print('I will try to delete this file:')
print(u'{0} ({1})'.format(item['name'], item['id']))
del_response = drive_service.files().delete(fileId=item['id'])
print('del_response.body:')
print( del_response.body)
print('I will try to emptyTrash:')
trash_response = drive_service.files().emptyTrash()
print('trash_response.body:')
print( trash_response.body)
To:
for item in items:
print('I will try to delete this file:')
print(u'{0} ({1})'.format(item['name'], item['id']))
del_response = drive_service.files().delete(fileId=item['id']).execute() # Modified
print('del_response.body:')
print(del_response)
print('I will try to emptyTrash:')
trash_response = drive_service.files().emptyTrash().execute() # Modified
print('trash_response.body:')
print(trash_response)
execute() was added for drive_service.files().delete() and drive_service.files().emptyTrash().
References:
Files: delete
Files: emptyTrash
If this was not the result you want, I apologize.

Local File from repository not saved after edit (groovy / jenkins)

In my Jenkins Job i checked out a repository. In that repo, there is a file which i want to edit during the job. But it seems that the file is not saved. I have a Method like this:
def updateFile(id, key){
def inputFile = readFile("${workspace}/config/cnf.json")
def inputJSON = new JsonSlurper().parseText(inputFile)
inputJSON."${key}"[0].pref = "${id}"
def result = JsonOutput.toJson(inputJSON)
//here it is changed.
println "result:\n${result}"
inputFile << "${JsonOutput.prettyPrint(result)}"
//and now it is again the old one.
println "Hier: \n ${inputFile}"
}
Problem is, that i can't use "new File" and ".write" or ".append" because Jenkins cant find the File that way.
def inputFile = new File("${workspace}/config/cnf.json") --> no File found
Is there any good way to save the existing File?
if readFile("${workspace}/config/cnf.json") works fine
then to write file use writeFile like this:
writeFile file:"${workspace}/config/cnf.json", text:result

Pipeline doesn't write to MySQL but also gives no error

I've tried to implement this pipeline in my spider.
After installing the necessary dependencies I am able to run the spider without any errors but for some reason it doesn't write to my database.
I'm pretty sure there is something going wrong with connecting to the database. When I give in a wrong password, I still don't get any error.
When the spider scraped all the data, it needs a few minutes before it starts dumping the stats.
2017-08-31 13:17:12 [scrapy] INFO: Closing spider (finished)
2017-08-31 13:17:12 [scrapy] INFO: Stored csv feed (27 items) in: test.csv
2017-08-31 13:24:46 [scrapy] INFO: Dumping Scrapy stats:
Pipeline:
import MySQLdb.cursors
from twisted.enterprise import adbapi
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
from scrapy.utils.project import get_project_settings
from scrapy import log
SETTINGS = {}
SETTINGS['DB_HOST'] = 'mysql.domain.com'
SETTINGS['DB_USER'] = 'username'
SETTINGS['DB_PASSWD'] = 'password'
SETTINGS['DB_PORT'] = 3306
SETTINGS['DB_DB'] = 'database_name'
class MySQLPipeline(object):
#classmethod
def from_crawler(cls, crawler):
return cls(crawler.stats)
def __init__(self, stats):
print "init"
#Instantiate DB
self.dbpool = adbapi.ConnectionPool ('MySQLdb',
host=SETTINGS['DB_HOST'],
user=SETTINGS['DB_USER'],
passwd=SETTINGS['DB_PASSWD'],
port=SETTINGS['DB_PORT'],
db=SETTINGS['DB_DB'],
charset='utf8',
use_unicode = True,
cursorclass=MySQLdb.cursors.DictCursor
)
self.stats = stats
dispatcher.connect(self.spider_closed, signals.spider_closed)
def spider_closed(self, spider):
print "close"
""" Cleanup function, called after crawing has finished to close open
objects.
Close ConnectionPool. """
self.dbpool.close()
def process_item(self, item, spider):
print "process"
query = self.dbpool.runInteraction(self._insert_record, item)
query.addErrback(self._handle_error)
return item
def _insert_record(self, tx, item):
print "insert"
result = tx.execute(
" INSERT INTO matches(type,home,away,home_score,away_score) VALUES (soccer,"+item["home"]+","+item["away"]+","+item["score"].explode("-")[0]+","+item["score"].explode("-")[1]+")"
)
if result > 0:
self.stats.inc_value('database/items_added')
def _handle_error(self, e):
print "error"
log.err(e)
Spider:
import scrapy
import dateparser
from crawling.items import KNVBItem
class KNVBspider(scrapy.Spider):
name = "knvb"
start_urls = [
'http://www.knvb.nl/competities/eredivisie/uitslagen',
]
custom_settings = {
'ITEM_PIPELINES': {
'crawling.pipelines.MySQLPipeline': 301,
}
}
def parse(self, response):
# www.knvb.nl/competities/eredivisie/uitslagen
for row in response.xpath('//div[#class="table"]'):
for div in row.xpath('./div[#class="row"]'):
match = KNVBItem()
match['home'] = div.xpath('./div[#class="value home"]/div[#class="team"]/text()').extract_first()
match['away'] = div.xpath('./div[#class="value away"]/div[#class="team"]/text()').extract_first()
match['score'] = div.xpath('./div[#class="value center"]/text()').extract_first()
match['date'] = dateparser.parse(div.xpath('./preceding-sibling::div[#class="header"]/span/span/text()').extract_first(), languages=['nl']).strftime("%d-%m-%Y")
yield match
If there are better pipelines available to do what I'm trying to achieve that'd be welcome as well. Thanks!
Update:
With the link provided in the accepted answer I eventually got to this function that's working (and thus solved my problem):
def process_item(self, item, spider):
print "process"
query = self.dbpool.runInteraction(self._insert_record, item)
query.addErrback(self._handle_error)
query.addBoth(lambda _: item)
return query
Take a look at this for how to use adbapi with MySQL for saving scraped items. Note the difference in your process_item and their process_item method implementation. While you return the item immediately, they return Deferred object which is the result of runInteraction method and which returns the item upon its completion. I think this is the reason your _insert_record never gets called.
If you can see the insert in your output that's already a good sign.
I'd rewrite the insert function this way:
def _insert_record(self, tx, item):
print "insert"
raw_sql = "INSERT INTO matches(type,home,away,home_score,away_score) VALUES ('%s', '%s', '%s', '%s', '%s')"
sql = raw_sql % ('soccer', item['home'], item['away'], item['score'].explode('-')[0], item['score'].explode('-')[1])
print sql
result = tx.execute(sql)
if result > 0:
self.stats.inc_value('database/items_added')
It allows you to debug the sql you're using. In you version you're not wrapping the string in ' which is a syntax error in mysql.
I'm not sure about your last values (score) so I treated them as strings.

Dynamically create REST test steps in groovy

I have some JSON file that I want to use as input, let's consider that I have this folders
mainFolder --> Folder 1 : 10 JSON file (req)
--> Folder 2 : 10 JSON file (req)
I want to create from these folders : - Each directory is a testCase - Each file is a testStep
Here's my code :
import com.eviware.soapui.impl.wsdl.teststeps.registry.GroovyScriptStepFactory
import com.eviware.soapui.support.UISupport;
import com.eviware.soapui.impl.wsdl.teststeps.registry.RestRequestStepFactory
import com.eviware.soapui.config.TestStepConfig
import com.eviware.soapui.impl.rest.*;
def myTestCase = context.testCase
log.info myTestCase
def projectPath = Path
def endPoint = "anEndPoint";
def addTestStep(operation, requestFile, testCase, projectPath, endpoint){
def usageId = requestFile.name.replace("_request.json","")
def projectPathTest = projectPath+"SPecificPath";
def testStepName=usageId;
def iface = testCase.testSuite.project.getInterfaceList()[0];
def operationName= operation;
def op = iface.operations[operationName];
def config = com.eviware.soapui.impl.wsdl.teststeps.registry.RestRequestStepFactory.createConfig( op, testStepName);
def newTestStep = testCase.addTestStep( config );
newTestStep.getTestRequest().setRequestContent(requestFile.text)
newTestStep.httpRequest.endpoint = endpoint
}
if ( com.eviware.soapui.support.UISupport.confirm("Reconstruct ?","Confirm") ){
testSuite.getTestCaseList().each{testCase->testSuite.removeTestCase(testCase)}
new File(projectPathTest).eachDir{dir->
operation = dir.name
def RestTestCase = testSuite.addNewTestCase(operation)
RestTestCase.setFailOnError(false)
dir.eachFileMatch(~/.*_request\.json/){file->
addTestStep(operation, file, RestTestCase, projectPath, endPoint)
}
}
}
I verified many times, many pages, many forums, it seems that I have the correct form and algorithm to get what I want, instead, I succeed to create testCase with the names of the folders, I succeed to get the request JSON file, but I fail to create the test step, and I'm pretty sure it's either the config or interface/operation who make it fail :
def config = com.eviware.soapui.impl.wsdl.teststeps.registry.RestRequestStepFactory.createConfig( op, testStepName);
Any help please ?

phpstorm mercurial pre commit hook not executed

I have this pre-commit hook set up in .hg/hgrc
[hooks]
pre-commit = python:commit.py:run
commit.py
#!/usr/bin/env python
import os
import time
def run(ui, repo, **kwargs):
datestring = time.strftime('%d-%m-%y %H:%M')
filecontent = 'span.project-date-content:after {content:"'+datestring+'"}'
with open('css/projectdate.css', 'w') as f:
f.write(filecontent)
return False
This works exactly as expected when I do a commit from the command line, however when I do a commit from within phpstorm the projectdate.css file does not get updated.
How can I get phpstorm to execute my (or any) pre-commit hook before commiting?